Fix followerDistributedDataStore tear down 57/111357/1 master
authorlubos-cicut <lubos.cicut@pantheon.tech>
Wed, 10 Apr 2024 08:48:50 +0000 (10:48 +0200)
committerlubos-cicut <lubos.cicut@pantheon.tech>
Wed, 10 Apr 2024 08:48:50 +0000 (10:48 +0200)
Fix closing of followerDistributedDataStore in
DistributedDataStoreRemotingIntegrationTest#tearDown.

Change-Id: Iba35cae665f29a9da0430baff1f792191a4d1287
Signed-off-by: lubos-cicut <lubos.cicut@pantheon.tech>
1082 files changed:
.readthedocs.yml [new file with mode: 0644]
akka/pom.xml
akka/repackaged-akka-jar/pom.xml
akka/repackaged-akka-jar/src/main/resources/actor_reference.conf
akka/repackaged-akka-jar/src/main/resources/remote_reference.conf
akka/repackaged-akka/pom.xml
artifacts/pom.xml
atomix-storage/LICENSE [new file with mode: 0644]
atomix-storage/pom.xml [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/BufferCleaner.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/Cleaner.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/CommitsSegmentJournalReader.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/DiskFileReader.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/DiskJournalSegmentWriter.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/FileReader.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/Indexed.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/Journal.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/JournalReader.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegment.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentDescriptor.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentFile.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentReader.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentWriter.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/JournalSerdes.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/JournalWriter.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/MappedFileReader.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/MappedJournalSegmentWriter.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/SegmentEntry.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournal.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournalReader.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournalWriter.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/StorageException.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/StorageLevel.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/index/JournalIndex.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/index/Position.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/index/SparseJournalIndex.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/index/package-info.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/package-info.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/BufferAwareByteArrayOutputStream.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/ByteArrayOutput.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/EntrySerializer.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/Kryo505ByteBufferInput.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/KryoEntryInput.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/KryoEntryOutput.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/KryoIOPool.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/KryoInputPool.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/KryoJournalSerdes.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/KryoJournalSerdesBuilder.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/KryoOutputPool.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/RegisteredType.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/package-info.java [new file with mode: 0644]
atomix-storage/src/test/java/io/atomix/storage/journal/AbstractJournalTest.java [new file with mode: 0644]
atomix-storage/src/test/java/io/atomix/storage/journal/ByteArraySerdes.java [new file with mode: 0644]
atomix-storage/src/test/java/io/atomix/storage/journal/DiskJournalTest.java [new file with mode: 0644]
atomix-storage/src/test/java/io/atomix/storage/journal/JournalSegmentDescriptorTest.java [new file with mode: 0644]
atomix-storage/src/test/java/io/atomix/storage/journal/JournalSegmentFileTest.java [new file with mode: 0644]
atomix-storage/src/test/java/io/atomix/storage/journal/MappedJournalTest.java [new file with mode: 0644]
atomix-storage/src/test/java/io/atomix/storage/journal/TestEntry.java [new file with mode: 0644]
atomix-storage/src/test/java/io/atomix/storage/journal/TestEntrySerdes.java [new file with mode: 0644]
atomix-storage/src/test/java/io/atomix/storage/journal/index/SparseJournalIndexTest.java [new file with mode: 0644]
atomix-storage/src/test/java/io/atomix/utils/serializer/BufferAwareByteArrayOutputStreamTest.java [new file with mode: 0644]
atomix-storage/src/test/java/io/atomix/utils/serializer/KryoInputPoolTest.java [new file with mode: 0644]
atomix-storage/src/test/java/io/atomix/utils/serializer/KryoOutputPoolTest.java [new file with mode: 0644]
atomix-storage/src/test/resources/logback.xml [new file with mode: 0644]
benchmark/api/pom.xml
benchmark/dsbenchmark/pom.xml
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/BaListBuilder.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/DatastoreAbstractWriter.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/DomListBuilder.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/DsbenchmarkProvider.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/listener/DsbenchmarkListener.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/listener/DsbenchmarkListenerProvider.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxBaRead.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxBaWrite.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxDomRead.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxDomWrite.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainBaDelete.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainBaRead.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainBaWrite.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainDomDelete.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainDomRead.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainDomWrite.java
benchmark/ntfbenchmark/pom.xml
benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchBlockingProducer.java
benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchNonblockingProducer.java
benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchTestListener.java
benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchWTCListener.java
benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchmarkProvider.java
benchmark/pom.xml
benchmark/rpcbenchmark/pom.xml
benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/AbstractRpcbenchPayloadService.java
benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/GlobalBindingRTCClient.java
benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/GlobalBindingRTCServer.java
benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/RoutedBindingRTCServer.java
benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/RoutedBindingRTClient.java
benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/RpcbenchmarkProvider.java
bundle-parent/pom.xml
docs/dev-guide.rst
docs/images/Transaction.jpg [deleted file]
docs/images/configuration.jpg [deleted file]
docs/pom.xml
features/features-controller-experimental/pom.xml
features/features-controller-testing/pom.xml
features/features-controller/pom.xml
features/odl-clustering-test-app/pom.xml
features/odl-clustering-test-app/src/main/feature/feature.xml
features/odl-controller-akka/pom.xml
features/odl-controller-akka/src/main/history/dependencies.xml
features/odl-controller-blueprint/pom.xml
features/odl-controller-blueprint/src/main/feature/feature.xml
features/odl-controller-broker-local/pom.xml
features/odl-controller-broker-local/src/main/feature/feature.xml
features/odl-controller-exp-netty-config/pom.xml [deleted file]
features/odl-controller-exp-netty-config/src/main/feature/feature.xml [deleted file]
features/odl-controller-mdsal-common/pom.xml
features/odl-controller-mdsal-common/src/main/feature/feature.xml
features/odl-controller-scala/pom.xml
features/odl-controller-scala/src/main/history/dependencies.xml
features/odl-jolokia/pom.xml
features/odl-mdsal-benchmark/pom.xml
features/odl-mdsal-broker/pom.xml
features/odl-mdsal-broker/src/main/feature/feature.xml
features/odl-mdsal-clustering-commons/pom.xml
features/odl-mdsal-clustering-commons/src/main/feature/feature.xml
features/odl-mdsal-distributed-datastore/pom.xml
features/odl-mdsal-distributed-datastore/src/main/feature/feature.xml
features/odl-mdsal-remoterpc-connector/pom.xml
features/odl-toaster/pom.xml
features/odl-toaster/src/main/feature/feature.xml
features/pom.xml
features/single-feature-parent/pom.xml
jolokia/pom.xml
karaf/pom.xml
opendaylight/blueprint/pom.xml
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/BlueprintBundleTracker.java
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/AbstractInvokableServiceMetadata.java [deleted file]
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/ActionProviderBean.java [deleted file]
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/ActionServiceMetadata.java [deleted file]
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/BindingContext.java
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/ComponentProcessor.java
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/DataStoreAppConfigDefaultXMLReader.java
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/DataStoreAppConfigMetadata.java
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/NotificationListenerBean.java [deleted file]
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/OpendaylightNamespaceHandler.java
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcImplementationBean.java [deleted file]
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcServiceMetadata.java [deleted file]
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcUtil.java [deleted file]
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/SpecificReferenceListMetadata.java [deleted file]
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/StaticReferenceMetadata.java [deleted file]
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/StaticServiceReferenceRecipe.java
opendaylight/blueprint/src/test/java/org/opendaylight/controller/blueprint/tests/DataStoreAppConfigDefaultXMLReaderTest.java
opendaylight/config/netty-event-executor-config/pom.xml [deleted file]
opendaylight/config/netty-event-executor-config/src/main/java/org/opendaylight/controller/config/yang/netty/eventexecutor/AutoCloseableEventExecutor.java [deleted file]
opendaylight/config/netty-event-executor-config/src/main/java/org/opendaylight/controller/config/yang/netty/eventexecutor/OSGiGlobalEventExecutor.java [deleted file]
opendaylight/config/netty-threadgroup-config/pom.xml [deleted file]
opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/AbstractGlobalGroup.java [deleted file]
opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/Configuration.java [deleted file]
opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/GlobalBossGroup.java [deleted file]
opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/GlobalWorkerGroup.java [deleted file]
opendaylight/config/netty-timer-config/pom.xml [deleted file]
opendaylight/config/netty-timer-config/src/main/java/org/opendaylight/controller/config/yang/netty/timer/HashedWheelTimerCloseable.java [deleted file]
opendaylight/config/netty-timer-config/src/main/java/org/opendaylight/controller/config/yang/netty/timer/OSGiGlobalTimer.java [deleted file]
opendaylight/config/pom.xml [deleted file]
opendaylight/config/threadpool-config-api/pom.xml [deleted file]
opendaylight/config/threadpool-config-api/src/main/java/org/opendaylight/controller/config/threadpool/ScheduledThreadPool.java [deleted file]
opendaylight/config/threadpool-config-api/src/main/java/org/opendaylight/controller/config/threadpool/ThreadPool.java [deleted file]
opendaylight/config/threadpool-config-impl/pom.xml [deleted file]
opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/FixedThreadPoolWrapper.java [deleted file]
opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/FlexibleThreadPoolWrapper.java [deleted file]
opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/NamingThreadPoolFactory.java [deleted file]
opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/ScheduledThreadPoolWrapper.java [deleted file]
opendaylight/md-sal/cds-access-api/pom.xml
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/ABIVersion.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/AbstractVersionException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/FutureVersionException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/PastVersionException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbortLocalTransactionRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractLocalHistoryRequestProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractLocalTransactionRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadPathTransactionRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadPathTransactionRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadTransactionRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadTransactionRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionRequestProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionSuccessProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCF.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCS.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CHR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ClosedTransactionException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CommitLocalTransactionRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientFailure.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientFailureProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientSuccess.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientSuccessProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CreateLocalHistoryRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CreateLocalHistoryRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DHR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DeadHistoryException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DeadTransactionException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DestroyLocalHistoryRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DestroyLocalHistoryRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ETR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ETS.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionSuccess.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionSuccessProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/HF.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/HS.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ITSR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ITSS.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceSuccess.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceSuccessProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryFailure.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryFailureProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistorySuccess.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistorySuccessProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/MTR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/MTS.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestBuilder.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionSuccess.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionSuccessProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/NotLeaderException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/OutOfOrderRequestException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/OutOfSequenceEnvelopeException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PHR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PersistenceProtocol.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PurgeLocalHistoryRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PurgeLocalHistoryRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/RTR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/RTS.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccess.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccessProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/STR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/STS.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsRequestV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsResponse.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsResponseProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TAR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TAS.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TCCS.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TCS.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TDCR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TF.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPCR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPCS.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPS.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortSuccess.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortSuccessProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCanCommitSuccess.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCanCommitSuccessProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCommitSuccess.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCommitSuccessProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDataModification.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDelete.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDoCommitRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDoCommitRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionFailure.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionFailureProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionMerge.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionModification.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitSuccess.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitSuccessProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeResponse.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeResponseProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionSuccess.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionWrite.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/UnknownHistoryException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractEnvelopeProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractMessageProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractRequestFailureProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractRequestProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractResponseEnvelopeProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractResponseProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractSuccessProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/CI.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/ClientIdentifier.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/Envelope.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FE.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FI.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FT.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FailureEnvelope.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FailureEnvelopeProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FrontendIdentifier.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FrontendType.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/HI.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/LocalHistoryIdentifier.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/MN.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/MemberName.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/Message.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RE.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/Request.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestEnvelope.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestEnvelopeProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestFailure.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestSuccess.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/Response.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/ResponseEnvelope.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RetiredGenerationException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RuntimeRequestException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SE.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SliceableMessage.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SuccessEnvelope.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SuccessEnvelopeProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/TI.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/TransactionIdentifier.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/UnsupportedRequestException.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/ABIVersionTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbortLocalTransactionRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractLocalTransactionRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractReadTransactionRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractRequestFailureTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractRequestSuccessTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionSuccessTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/CommitLocalTransactionRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ConnectClientFailureTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ConnectClientRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ConnectClientSuccessTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionSuccessTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryFailureTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/LocalHistorySuccessTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestBuilderTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestEmptyTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionSuccessTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccessNoDataTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccessTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsResponseTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortSuccessTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionCanCommitSuccessTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionCommitSuccessTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionDoCommitRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionFailureTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitSuccessTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeResponseTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/AbstractEnvelopeTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/AbstractIdentifierTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/AbstractRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/ClientIdentifierTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/FailureEnvelopeTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/FrontendIdentifierTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/FrontendTypeTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/LocalHistoryIdentifierTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/MemberNameTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/RequestEnvelopeTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/SuccessEnvelopeTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/TransactionIdentifierTest.java
opendaylight/md-sal/cds-access-client/pom.xml
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/AbstractClientActor.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/AbstractClientActorBehavior.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/AbstractClientConnection.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ClientActorBehavior.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ClientActorContext.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ConnectedClientConnection.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ConnectingClientConnection.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ConnectionEntry.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/InversibleLock.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/InversibleLockException.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ProgressTracker.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/RecoveringClientActorBehavior.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/SavingClientActorBehavior.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/TransmitQueue.java
opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ActorBehaviorTest.java
opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ConnectedClientConnectionTest.java
opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ConnectingClientConnectionTest.java
opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ConnectionEntryTest.java
opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ReconnectingClientConnectionTest.java
opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/TransmittingTransmitQueueTest.java
opendaylight/md-sal/cds-dom-api/pom.xml
opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/LeaderLocation.java
opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/LeaderLocationListener.java
opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/LeaderLocationListenerRegistration.java [deleted file]
opendaylight/md-sal/cds-mgmt-api/pom.xml
opendaylight/md-sal/eos-dom-akka/pom.xml
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/AkkaEntityOwnershipService.java
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/CandidateRegistration.java
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/DataCenterControl.java
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/ListenerRegistration.java
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/bootstrap/EOSMain.java
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/AbstractSupervisor.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/CandidateCleaner.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/IdleSupervisor.java
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/OwnerSupervisor.java
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/OwnerSyncer.java
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidates.java [moved from opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/InitialCandidateSync.java with 58% similarity]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesForMember.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesResponse.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesUpdateResponse.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/CandidateRegistry.java
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/CandidateRegistryInit.java
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/CandidateRemovalFailed.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/CandidateRemovalFinished.java [moved from opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/package-info.java with 67% similarity]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/RemovePreviousCandidates.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/owner/SingleEntityListenerActor.java
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/EntityTypeListenerActor.java
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/EntityOwnerChanged.java
opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/AbstractNativeEosTest.java
opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/AkkaEntityOwnershipServiceTest.java
opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/DataCentersTest.java
opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/EntityRpcHandlerTest.java
opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/ThreeNodeReachabilityTest.java
opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/service/ClusterSingletonIntegrationTest.java
opendaylight/md-sal/eos-dom-akka/src/test/resources/application.conf
opendaylight/md-sal/mdsal-it-base/pom.xml
opendaylight/md-sal/mdsal-it-parent/pom.xml
opendaylight/md-sal/parent/pom.xml
opendaylight/md-sal/pom.xml
opendaylight/md-sal/sal-akka-raft-example/pom.xml
opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/ExampleActor.java
opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/ExampleConfigParamsImpl.java
opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/LogGenerator.java
opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/messages/KVv1.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/messages/KeyValue.java
opendaylight/md-sal/sal-akka-raft/pom.xml
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/AbstractReplicatedLogImpl.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ClientRequestTracker.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ClientRequestTrackerImpl.java [deleted file]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ConfigParams.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/DefaultConfigParamsImpl.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/FollowerLogInformation.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/GetSnapshotReplyActor.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActor.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorContextImpl.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorDelegatingPersistentDataProvider.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorLeadershipTransferCohort.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorRecoveryCohort.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorRecoverySupport.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorServerConfigurationSupport.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorSnapshotMessageSupport.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftVersions.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLogEntry.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/SnapshotManager.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/TimedRunnable.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/base/messages/EmptyExternalizableProxy.java [deleted file]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/base/messages/Replicate.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/base/messages/TimeoutNow.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractLeader.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehavior.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Candidate.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/FI.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Follower.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/FollowerIdentifier.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Leader.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderInstallSnapshotState.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/SnapshotTracker.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/client/messages/Shutdown.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AE.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AR.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AbstractRaftRPC.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AppendEntries.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AppendEntriesReply.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/IR.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/IS.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/IdentifiablePayload.java [moved from opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/IdentifiablePayload.java with 80% similarity]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshot.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshotReply.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/Payload.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/PersistentPayload.java [moved from opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/PersistentPayload.java with 88% similarity]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RV.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RequestVote.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RequestVoteReply.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/VR.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/AJE.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/ApplyJournalEntries.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/DE.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/DeleteEntries.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/EmptyState.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/LE.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/LegacySerializable.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/NP.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/NoopPayload.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/SS.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/ServerConfigurationPayload.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/ServerInfo.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/SimpleReplicatedLogEntry.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/SimpleReplicatedLogEntrySerializer.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/Snapshot.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/UT.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/UpdateElectionTerm.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/AbstractRaftActorIntegrationTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/AbstractReplicatedLogImplTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/LeadershipTransferIntegrationTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/MockRaftActor.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/MockRaftActorContext.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/NonVotingFollowerIntegrationTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorContextImplTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorDelegatingPersistentDataProviderTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorRecoverySupportTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorServerConfigurationSupportTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorTestKit.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RecoveryIntegrationSingleNodeTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RecoveryIntegrationTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/ReplicatedLogImplTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/ReplicationAndSnapshotsIntegrationTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/ReplicationWithSlicedPayloadIntegrationTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/SnapshotManagerTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/TestActorFactory.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/base/messages/TimeoutNowTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehaviorTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/FollowerIdentifierTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/FollowerTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderInstallSnapshotStateTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/SnapshotTrackerTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/client/messages/ShutdownTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/AppendEntriesReplyTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/AppendEntriesTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshotReplyTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshotTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/RequestVoteReplyTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/RequestVoteTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/ApplyJournalEntriesTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/DeleteEntriesTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/EmptyStateTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/NoopPayloadTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/ServerConfigurationPayloadTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/SimpleReplicatedLogEntryTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/SnapshotTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/UpdateElectionTermTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/InMemoryJournal.java
opendaylight/md-sal/sal-akka-segmented-journal/pom.xml
opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournal.java
opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalEntry.java
opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalEntrySerdes.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalEntrySerializer.java [deleted file]
opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalV0.java
opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/LongEntrySerdes.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/SegmentedFileJournal.java
opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/SegmentedJournalActor.java
opendaylight/md-sal/sal-akka-segmented-journal/src/test/java/org/opendaylight/controller/akka/segjournal/PerformanceTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-segmented-journal/src/test/java/org/opendaylight/controller/akka/segjournal/SegmentedFileJournalSpecTest.java
opendaylight/md-sal/sal-akka-segmented-journal/src/test/java/org/opendaylight/controller/akka/segjournal/SegmentedFileJournalTest.java
opendaylight/md-sal/sal-binding-it/pom.xml
opendaylight/md-sal/sal-binding-it/src/test/java/org/opendaylight/controller/test/sal/binding/it/AbstractIT.java
opendaylight/md-sal/sal-binding-it/src/test/java/org/opendaylight/controller/test/sal/binding/it/NotificationIT.java
opendaylight/md-sal/sal-binding-it/src/test/java/org/opendaylight/controller/test/sal/binding/it/RoutedServiceIT.java
opendaylight/md-sal/sal-cluster-admin-api/pom.xml
opendaylight/md-sal/sal-cluster-admin-impl/pom.xml
opendaylight/md-sal/sal-cluster-admin-impl/src/main/java/org/opendaylight/controller/cluster/datastore/admin/ClusterAdminRpcService.java
opendaylight/md-sal/sal-cluster-admin-impl/src/main/java/org/opendaylight/controller/cluster/datastore/admin/OSGiClusterAdmin.java
opendaylight/md-sal/sal-cluster-admin-impl/src/test/java/org/opendaylight/controller/cluster/datastore/admin/ClusterAdminRpcServiceTest.java
opendaylight/md-sal/sal-cluster-admin-karaf-cli/pom.xml
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/ActivateEosDatacenterCommand.java
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/AddReplicasForAllShardsCommand.java
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/AddShardReplicaCommand.java
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/BackupDatastoreCommand.java
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/ChangeMemberVotingStatesForAllShardsCommand.java
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/ChangeMemberVotingStatesForShardCommand.java
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/DeactivateEosDatacenterCommand.java
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/FlipMemberVotingStatesForAllShardsCommand.java
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/GetKnownClientsForAllShardsCommand.java
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/GetShardRoleCommand.java
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/LocateShardCommand.java
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/MakeLeaderLocalCommand.java
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/RemoveAllShardReplicasCommand.java
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/RemoveShardReplicaCommand.java
opendaylight/md-sal/sal-clustering-commons/pom.xml
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/DataPersistenceProvider.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/DelegatingPersistentDataProvider.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/NonPersistentDataProvider.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/PersistentDataProvider.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActor.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActorWithMetering.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedPersistentActor.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedPersistentActorWithMetering.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/QuarantinedMonitorActor.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/NormalizedNodeNavigator.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/SerializationUtils.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/AbstractNormalizedNodePruner.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/UintAdaptingPruner.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/util/AbstractDataTreeModificationCursor.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/ChunkedOutputStream.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/FileBackedOutputStream.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/messaging/MessageAssembler.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/messaging/MessageSliceIdentifier.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/messaging/MessageSlicer.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/notifications/LeaderStateChanged.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/notifications/RoleChangeNotifier.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/persistence/LocalSnapshotStore.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/Payload.java [deleted file]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/schema/provider/RemoteYangTextSourceProvider.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteSchemaProvider.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteYangTextSourceProviderImpl.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/schema/provider/impl/YangTextSchemaSourceSerializationProxy.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/common/actor/MessageTrackerTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/SerializationUtilsTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/NormalizedNodePrunerTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/UintAdaptingPrunerTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/util/TestModel.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/io/ChunkedOutputStreamTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/io/FileBackedOutputStreamTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/AbortSlicingTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageAssemblerTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageSliceIdentifierTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageSliceReplyTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageSliceTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageSlicingIntegrationTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/persistence/LocalSnapshotStoreTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteSchemaProviderTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteYangTextSourceProviderImplTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/schema/provider/impl/YangTextSourceSerializationProxyTest.java
opendaylight/md-sal/sal-clustering-config/pom.xml
opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/datastore.cfg
opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/factory-akka.conf
opendaylight/md-sal/sal-common-util/pom.xml
opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/md/sal/common/util/jmx/AbstractMXBean.java
opendaylight/md-sal/sal-distributed-datastore/pom.xml
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/akka/osgi/impl/OSGiActorSystemProvider.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBroker.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBrokerTransaction.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBrokerWriteTransaction.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMTransactionFactory.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/ConcurrentDOMDataBroker.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerReadOnlyTransaction.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerReadWriteTransaction.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerTransactionChain.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerWriteOnlyTransaction.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DataBrokerCommitExecutor.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/OSGiDOMDataBroker.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractClientHandle.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractClientHistory.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractDataStoreClientActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractDataStoreClientBehavior.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractProxyTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractShardBackendResolver.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractTransactionCommitCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientSnapshot.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientTransactionCommitCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/DirectTransactionCommitCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/DistributedDataStoreClientBehavior.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/EmptyTransactionCommitCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/FailedDataTreeModification.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/FailedDataTreeModificationException.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/LocalProxyTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/LocalReadOnlyProxyTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/LocalReadWriteProxyTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ModuleShardBackendResolver.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ProxyHistory.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/RemoteProxyTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ShardBackendInfo.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/SimpleDataStoreClientBehavior.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/VotingFuture.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractDataStore.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractDatastoreContextIntrospectorFactory.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractFrontendHistory.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractShardDataTreeNotificationPublisherActorProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractShardDataTreeTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractThreePhaseCommitCohort.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionContextFactory.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionContextWrapper.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ChainedCommitCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/CohortEntry.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/CompositeDataTreeCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DOMDataTreeCandidateTO.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataStoreVersions.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerSupport.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortActorRegistry.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortRegistrationProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DatastoreContext.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DatastoreContextIntrospector.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DebugThreePhaseCommitCohort.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DefaultShardDataTreeChangeListenerPublisher.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DelayedDataTreeChangeListenerRegistration.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DelayedTransactionContextWrapper.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DirectTransactionContextWrapper.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreFactory.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreInterface.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ForwardingDataTreeChangeListener.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/FrontendClientMetadataBuilder.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/FrontendHistoryMetadataBuilder.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/FrontendMetadata.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/FrontendReadOnlyTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/FrontendReadWriteTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LeaderFrontendState.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalFrontendHistory.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalThreePhaseCommitCohort.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionChain.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionContext.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionFactory.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionFactoryImpl.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionReadySupport.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/NoOpDOMStoreThreePhaseCommitCohort.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/NoOpTransactionContext.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OSGiDOMStore.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OSGiDistributedDataStore.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OperationLimiter.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ReadOnlyShardDataTreeTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ReadWriteShardDataTreeTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContext.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContextSupport.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RootDataTreeChangeListenerActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RootDataTreeChangeListenerProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/Shard.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardCommitCoordinator.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTree.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeChangeListenerPublisher.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeChangeListenerPublisherActorProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeChangePublisherActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeListenerInfoMXBeanImpl.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeNotificationPublisher.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeNotificationPublisherActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeTransactionChain.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeTransactionParent.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardReadTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardReadWriteTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardRecoveryCoordinator.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransactionActorFactory.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransactionMessageRetrySupport.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardWriteTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/SimpleShardDataTreeCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/SingleCommitCohortProxy.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/StandaloneFrontendHistory.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionChainProxy.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContext.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContextCleanup.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContextFactory.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionModificationOperation.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionOperation.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionProxy.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionReadyReplyMapper.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionType.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/actors/DataTreeNotificationListenerRegistrationActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/actors/JsonExportActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/actors/ShardSnapshotActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/Configuration.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/ConfigurationImpl.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardIdentifier.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardManagerIdentifier.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbortTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbortTransactionReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbstractRead.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbstractThreePhaseCommitMessage.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ActorInitialized.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/BatchedModifications.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/BatchedModificationsReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CanCommitTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CanCommitTransactionReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseDataTreeNotificationListenerRegistration.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseDataTreeNotificationListenerRegistrationReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseTransactionChain.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseTransactionReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CommitTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CommitTransactionReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CreateTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CreateTransactionReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataExists.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataExistsReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataTreeChanged.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataTreeChangedReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/EmptyExternalizable.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ForwardedReadyTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/GetKnownClients.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/LocalPrimaryShardFound.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/PrimaryShardInfo.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadData.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadDataReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadyLocalTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadyLocalTransactionSerializer.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadyTransactionReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ShardLeaderStateChanged.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/SuccessReply.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/UpdateSchemaContext.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/VersionedExternalizableMessage.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/AbstractModification.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/CompositeModification.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/DeleteModification.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/MergeModification.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/Modification.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/MutableCompositeModification.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/WriteModification.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AT.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AbortTransactionPayload.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AbstractDataTreeCandidateNode.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AbstractIdentifiablePayload.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AbstractVersionedShardDataTreeSnapshot.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CH.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CT.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CloseLocalHistoryPayload.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CommitTransactionPayload.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CreateLocalHistoryPayload.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DH.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DS.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DSS.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DT.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DataTreeCandidateInputOutput.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DatastoreSnapshot.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DeletedDataTreeCandidateNode.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DisableTrackingPayload.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/FM.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/FrontendClientMetadata.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/FrontendShardDataTreeSnapshotMetadata.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/MS.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/MetadataShardDataTreeSnapshot.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/ModifiedDataTreeCandidateNode.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/PH.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/PT.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/PayloadVersion.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/PurgeLocalHistoryPayload.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/PurgeTransactionPayload.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/SM.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/SS.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/ST.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/ShardManagerSnapshot.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/ShardSnapshotState.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/SkipTransactionsPayload.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardmanager/AbstractShardManagerCreator.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardmanager/AtomicShardContextProvider.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardInformation.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardManager.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardManagerInfo.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardManagerSnapshot.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/AbstractBatchedModificationsCursor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/ActorUtils.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/DataTreeModificationOutput.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/NormalizedNodeAggregator.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/PruningDataTreeModification.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/RootScatterGather.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/UnsignedLongBitmap.java
opendaylight/md-sal/sal-distributed-datastore/src/main/yang/distributed-datastore-provider.yang
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBrokerWriteTransactionTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/ClientBackedDataStoreTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/ClientBackedReadTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/ClientBackedReadWriteTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/ClientBackedWriteTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/ConcurrentDOMDataBrokerTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractClientHandleTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractClientHistoryTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractDataStoreClientBehaviorTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractProxyTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientSnapshotTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientTransactionCommitCohortTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/DirectTransactionCommitCohortTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/DistributedDataStoreClientBehaviorTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/EmptyTransactionCommitCohortTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/LocalProxyTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/LocalReadOnlyProxyTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/LocalReadWriteProxyTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/ModuleShardBackendResolverTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/RemoteProxyTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/TestUtils.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/TransactionTester.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/AbstractDistributedDataStoreIntegrationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/AbstractShardTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/AbstractTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionProxyTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerActorTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerProxyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerSupportTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortActorTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortIntegrationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DatastoreContextContextPropertiesUpdaterTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DatastoreContextIntrospectorTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DatastoreContextTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DatastoreSnapshotRestoreTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DebugThreePhaseCommitCohortTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DelayedTransactionContextWrapperTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DirectTransactionContextWrapperTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreIntegrationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreRemotingIntegrationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreWithSegmentedJournalIntegrationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ForwardingDataTreeChangeListenerTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/FrontendReadWriteTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/IntegrationTestKit.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/JsonExportTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/LocalTransactionContextTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/MemberNode.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/OperationCallback.java [moved from opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OperationCallback.java with 62% similarity]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContextTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/RootDataTreeChangeListenerProxyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardCommitCoordinationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeMocking.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardRecoveryCoordinatorTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTestKit.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTransactionFailureTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/SimpleShardDataTreeCohortTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TestDistributedDataStore.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohortProxy.java [moved from opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohortProxy.java with 82% similarity]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohortProxyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TransactionChainProxyTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TransactionProxyTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TransactionRateLimitingCallback.java [moved from opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionRateLimitingCallback.java with 90% similarity]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TransactionRateLimitingCallbackTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/actors/ShardSnapshotActorTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/AbortTransactionReplyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/AbortTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/BatchedModificationsTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/CanCommitTransactionReplyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/CanCommitTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/CloseTransactionChainTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/CloseTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/CommitTransactionReplyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/CommitTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/CreateTransactionReplyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/CreateTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/DataExistsReplyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/DataExistsTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/ReadDataReplyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/ReadDataTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/ReadyLocalTransactionSerializerTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/ReadyTransactionReplyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/modification/AbstractModificationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/modification/DeleteModificationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/modification/MergeModificationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/modification/MutableCompositeModificationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/modification/WriteModificationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/AbortTransactionPayloadTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/AbstractIdentifiablePayloadTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/CloseLocalHistoryPayloadTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/CommitTransactionPayloadTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/CreateLocalHistoryPayloadTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/FrontendShardDataTreeSnapshotMetadataTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/PurgeLocalHistoryPayloadTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/PurgeTransactionPayloadTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/ShardDataTreeSnapshotTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/ShardManagerSnapshotTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/ShardSnapshotStateTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/SkipTransactionsPayloadTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardManagerTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/ActorUtilsTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/MockDataTreeChangeListener.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/NormalizedNodeAggregatorTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/PruningDataTreeModificationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/md/cluster/datastore/model/CarsModel.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/md/cluster/datastore/model/CompositeModel.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/md/cluster/datastore/model/PeopleModel.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/md/cluster/datastore/model/TestModel.java
opendaylight/md-sal/sal-distributed-datastore/src/test/resources/expectedJournalExport.json
opendaylight/md-sal/sal-distributed-datastore/src/test/resources/segmented.conf
opendaylight/md-sal/sal-dummy-distributed-datastore/pom.xml
opendaylight/md-sal/sal-dummy-distributed-datastore/src/main/java/org/opendaylight/controller/dummy/datastore/DummyShardManager.java
opendaylight/md-sal/sal-remoterpc-connector/pom.xml
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/OSGiRemoteOpsProvider.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/OpsInvoker.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/OpsManager.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/OpsRegistrar.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteDOMActionFuture.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteDOMRpcFuture.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteRpcImplementation.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcErrorsException.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/TerminationMonitor.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/messages/ExecuteAction.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/messages/ExecuteRpc.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/messages/RpcResponse.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/ActionRegistry.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/ActionRoutingTable.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/RpcRegistry.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/BucketStoreAccess.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/BucketStoreActor.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/Gossiper.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/mbeans/AbstractRegistryMXBean.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/mbeans/RemoteActionRegistryMXBeanImpl.java
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/AbstractOpsTest.java
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/OpsBrokerTest.java
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/OpsListenerTest.java
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/OpsRegistrarTest.java
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RemoteOpsImplementationTest.java
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RpcErrorsExceptionTest.java
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/messages/ExecuteOpsTest.java
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/messages/OpsResponseTest.java
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/ActionRegistryTest.java
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/RpcRegistryTest.java
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/gossip/BucketStoreTest.java
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/mbeans/RemoteActionRegistryMXBeanImplTest.java
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/mbeans/RemoteRpcRegistryMXBeanImplTest.java
opendaylight/md-sal/sal-test-model/pom.xml
opendaylight/md-sal/samples/clustering-test-app/configuration/pom.xml
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/pom.xml
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/AbstractDOMRpcAction.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/DefaultInstanceIdentifierSupport.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/RegisterCommitCohortCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/RegisterLoggingDtclCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/RegisterOwnershipCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/StopStressTestCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/StressTestCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/UnregisterCommitCohortCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/UnregisterLoggingDtclsCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/UnregisterOwnershipCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/purchase/BuyCarCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/AddShardReplicaCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/CheckPublishNotificationsCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/IsClientAbortedCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/RegisterBoundConstantCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/RegisterConstantCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/RegisterDefaultConstantCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/RegisterFlappingSingletonCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/RegisterSingletonConstantCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/RemoveShardReplicaCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/ShutdownShardReplicaCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/StartPublishNotificationsCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/SubscribeDdtlCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/SubscribeDtclCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/SubscribeYnlCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnregisterBoundConstantCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnregisterConstantCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnregisterDefaultConstantCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnregisterFlappingSingletonCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnregisterSingletonConstantCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnsubscribeDdtlCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnsubscribeDtclCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnsubscribeYnlCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/WriteTransactionsCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/tgt/GetConstantCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/tgt/GetContextedConstantCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/tgt/GetSingletonConstantCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/people/AddPersonCommand.java
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/rpc/test/BasicGlobalCommand.java
opendaylight/md-sal/samples/clustering-test-app/model/pom.xml
opendaylight/md-sal/samples/clustering-test-app/pom.xml
opendaylight/md-sal/samples/clustering-test-app/provider/pom.xml
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/listener/CarBoughtListener.java [moved from opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/listener/PeopleCarListener.java with 58% similarity]
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/AddPersonImpl.java [moved from opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/PeopleProvider.java with 62% similarity]
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/BasicRpcTestProvider.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/CarDataTreeChangeListener.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/CarEntryDataTreeCommitCohort.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/CarProvider.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/MdsalLowLevelTestProvider.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/PurchaseCarProvider.java [deleted file]
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/FlappingSingletonService.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/GetConstantService.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/IdIntsListener.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/PublishNotificationsTask.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/RoutedGetConstantService.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/SingletonGetConstantService.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/WriteTransactionsHandler.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/YnlListener.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/resources/OSGI-INF/blueprint/cluster-test-app.xml [deleted file]
opendaylight/md-sal/samples/pom.xml
opendaylight/md-sal/samples/toaster-consumer/pom.xml
opendaylight/md-sal/samples/toaster-consumer/src/main/java/org/opendaylight/controller/sample/kitchen/api/KitchenService.java
opendaylight/md-sal/samples/toaster-consumer/src/main/java/org/opendaylight/controller/sample/kitchen/impl/KitchenServiceImpl.java
opendaylight/md-sal/samples/toaster-consumer/src/main/resources/OSGI-INF/blueprint/toaster-consumer.xml [deleted file]
opendaylight/md-sal/samples/toaster-it/pom.xml
opendaylight/md-sal/samples/toaster-it/src/test/java/org/opendaylight/controller/sample/toaster/it/ToasterTest.java
opendaylight/md-sal/samples/toaster-provider/pom.xml
opendaylight/md-sal/samples/toaster-provider/src/main/java/org/opendaylight/controller/sample/toaster/provider/OpendaylightToaster.java
opendaylight/md-sal/samples/toaster-provider/src/main/resources/OSGI-INF/blueprint/toaster-provider.xml [deleted file]
opendaylight/md-sal/samples/toaster-provider/src/main/yang/toaster-app-config.yang [deleted file]
opendaylight/md-sal/samples/toaster-provider/src/test/java/org/opendaylight/controller/sample/toaster/provider/OpenDaylightToasterTest.java
opendaylight/md-sal/samples/toaster/pom.xml
pom.xml

diff --git a/.readthedocs.yml b/.readthedocs.yml
new file mode 100644 (file)
index 0000000..48b1206
--- /dev/null
@@ -0,0 +1,21 @@
+# .readthedocs.yml
+# Read the Docs configuration file
+# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
+
+# Required
+version: 2
+
+build:
+  os: ubuntu-22.04
+  tools:
+    python: "3.11"
+  jobs:
+    post_checkout:
+      - git fetch --unshallow || true
+
+sphinx:
+  configuration: docs/conf.py
+
+python:
+  install:
+    - requirements: docs/requirements.txt
index 29d8db82175bdf2f9799bec73a030e8506ea6379..afd11d74106310b8f6d9a518096119dd45203779 100644 (file)
     <parent>
         <groupId>org.opendaylight.odlparent</groupId>
         <artifactId>odlparent-lite</artifactId>
-        <version>9.0.12</version>
+        <version>13.0.11</version>
         <relativePath/>
     </parent>
 
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>akka-aggregator</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <packaging>pom</packaging>
 
     <properties>
index d525bc0aa66264146c2416e4ef13a7cb088d7272..6c62c5d247c286ead6f506280a94affee1c076cc 100644 (file)
     <parent>
         <groupId>org.opendaylight.odlparent</groupId>
         <artifactId>odlparent</artifactId>
-        <version>9.0.12</version>
+        <version>13.0.11</version>
         <relativePath/>
     </parent>
 
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>repackaged-akka-jar</artifactId>
     <packaging>jar</packaging>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <name>${project.artifactId}</name>
 
     <properties>
         <dependency>
             <groupId>com.typesafe.akka</groupId>
             <artifactId>akka-actor_2.13</artifactId>
-            <version>2.6.18</version>
+            <version>2.6.21</version>
         </dependency>
         <dependency>
             <groupId>com.typesafe.akka</groupId>
             <artifactId>akka-actor-typed_2.13</artifactId>
-            <version>2.6.18</version>
+            <version>2.6.21</version>
         </dependency>
         <dependency>
             <groupId>com.typesafe.akka</groupId>
             <artifactId>akka-cluster_2.13</artifactId>
-            <version>2.6.18</version>
+            <version>2.6.21</version>
         </dependency>
         <dependency>
             <groupId>com.typesafe.akka</groupId>
             <artifactId>akka-cluster-typed_2.13</artifactId>
-            <version>2.6.18</version>
+            <version>2.6.21</version>
         </dependency>
         <dependency>
             <groupId>com.typesafe.akka</groupId>
             <artifactId>akka-osgi_2.13</artifactId>
-            <version>2.6.18</version>
+            <version>2.6.21</version>
         </dependency>
         <dependency>
             <groupId>com.typesafe.akka</groupId>
             <artifactId>akka-persistence_2.13</artifactId>
-            <version>2.6.18</version>
+            <version>2.6.21</version>
         </dependency>
         <dependency>
             <groupId>com.typesafe.akka</groupId>
             <artifactId>akka-protobuf_2.13</artifactId>
-            <version>2.6.18</version>
+            <version>2.6.21</version>
         </dependency>
         <dependency>
             <groupId>com.typesafe.akka</groupId>
             <artifactId>akka-remote_2.13</artifactId>
-            <version>2.6.18</version>
+            <version>2.6.21</version>
         </dependency>
         <dependency>
             <groupId>com.typesafe.akka</groupId>
             <artifactId>akka-slf4j_2.13</artifactId>
-            <version>2.6.18</version>
+            <version>2.6.21</version>
         </dependency>
         <dependency>
             <groupId>com.typesafe.akka</groupId>
             <artifactId>akka-stream_2.13</artifactId>
-            <version>2.6.18</version>
+            <version>2.6.21</version>
         </dependency>
     </dependencies>
 
                     </execution>
                 </executions>
             </plugin>
+            <plugin>
+                <artifactId>maven-source-plugin</artifactId>
+                <configuration>
+                    <!-- We handle this through shade plugin -->
+                    <skipSource>true</skipSource>
+                </configuration>
+            </plugin>
         </plugins>
     </build>
 </project>
index d0569ef3a69afb5a379c44793a1bb982392292f2..d41cb39ae4e039eb2c2ded848279ae55a02ef16c 100644 (file)
@@ -1307,4 +1307,45 @@ akka {
     #//#coordinated-shutdown-phases
   }
 
+  #//#circuit-breaker-default
+  # Configuration for circuit breakers created with the APIs accepting an id to
+  # identify or look up the circuit breaker.
+  # Note: Circuit breakers created without ids are not affected by this configuration.
+  # A child configuration section with the same name as the circuit breaker identifier
+  # will be used, with fallback to the `akka.circuit-breaker.default` section.
+  circuit-breaker {
+
+    # Default configuration that is used if a configuration section
+    # with the circuit breaker identifier is not defined.
+    default {
+      # Number of failures before opening the circuit.
+      max-failures = 10
+
+      # Duration of time after which to consider a call a failure.
+      call-timeout = 10s
+
+      # Duration of time in open state after which to attempt to close
+      # the circuit, by first entering the half-open state.
+      reset-timeout = 15s
+
+      # The upper bound of reset-timeout
+      max-reset-timeout = 36500d
+
+      # Exponential backoff
+      # For details see https://en.wikipedia.org/wiki/Exponential_backoff
+      exponential-backoff = 1.0
+
+      # Additional random delay based on this factor is added to backoff
+      # For example 0.2 adds up to 20% delay
+      # In order to skip this additional delay set as 0
+      random-factor = 0.0
+
+      # A allowlist of fqcn of Exceptions that the CircuitBreaker
+      # should not consider failures. By default all exceptions are
+      # considered failures.
+      exception-allowlist = []
+    }
+  }
+  #//#circuit-breaker-default
+
 }
index 5f569d5d985f7d2ee455d44f05788256720c1e06..a30bce7190f20664ec5b88de2609b4a4209359ff 100644 (file)
@@ -268,465 +268,467 @@ akka {
 
 akka {
 
-  remote.classic {
-    #//#classic
+  remote {
+  #//#classic
+    classic {
+
+      ### Configuration for classic remoting. Classic remoting is deprecated, use artery.
+
+
+      # If set to a nonempty string remoting will use the given dispatcher for
+      # its internal actors otherwise the default dispatcher is used. Please note
+      # that since remoting can load arbitrary 3rd party drivers (see
+      # "enabled-transport" and "adapters" entries) it is not guaranteed that
+      # every module will respect this setting.
+      use-dispatcher = "akka.remote.default-remote-dispatcher"
+
+      # Settings for the failure detector to monitor connections.
+      # For TCP it is not important to have fast failure detection, since
+      # most connection failures are captured by TCP itself.
+      # The default DeadlineFailureDetector will trigger if there are no heartbeats within
+      # the duration heartbeat-interval + acceptable-heartbeat-pause, i.e. 124 seconds
+      # with the default settings.
+      transport-failure-detector {
+
+        # FQCN of the failure detector implementation.
+        # It must implement akka.remote.FailureDetector and have
+        # a public constructor with a com.typesafe.config.Config and
+        # akka.actor.EventStream parameter.
+        implementation-class = "akka.remote.DeadlineFailureDetector"
+
+        # How often keep-alive heartbeat messages should be sent to each connection.
+        heartbeat-interval = 4 s
+
+        # Number of potentially lost/delayed heartbeats that will be
+        # accepted before considering it to be an anomaly.
+        # A margin to the `heartbeat-interval` is important to be able to survive sudden,
+        # occasional, pauses in heartbeat arrivals, due to for example garbage collect or
+        # network drop.
+        acceptable-heartbeat-pause = 120 s
+      }
 
-    ### Configuration for classic remoting. Classic remoting is deprecated, use artery.
 
+      # Timeout after which the startup of the remoting subsystem is considered
+      # to be failed. Increase this value if your transport drivers (see the
+      # enabled-transports section) need longer time to be loaded.
+      startup-timeout = 10 s
 
-    # If set to a nonempty string remoting will use the given dispatcher for
-    # its internal actors otherwise the default dispatcher is used. Please note
-    # that since remoting can load arbitrary 3rd party drivers (see
-    # "enabled-transport" and "adapters" entries) it is not guaranteed that
-    # every module will respect this setting.
-    use-dispatcher = "akka.remote.default-remote-dispatcher"
+      # Timout after which the graceful shutdown of the remoting subsystem is
+      # considered to be failed. After the timeout the remoting system is
+      # forcefully shut down. Increase this value if your transport drivers
+      # (see the enabled-transports section) need longer time to stop properly.
+      shutdown-timeout = 10 s
 
-    # Settings for the failure detector to monitor connections.
-    # For TCP it is not important to have fast failure detection, since
-    # most connection failures are captured by TCP itself.
-    # The default DeadlineFailureDetector will trigger if there are no heartbeats within
-    # the duration heartbeat-interval + acceptable-heartbeat-pause, i.e. 124 seconds
-    # with the default settings.
-    transport-failure-detector {
+      # Before shutting down the drivers, the remoting subsystem attempts to flush
+      # all pending writes. This setting controls the maximum time the remoting is
+      # willing to wait before moving on to shut down the drivers.
+      flush-wait-on-shutdown = 2 s
 
-      # FQCN of the failure detector implementation.
-      # It must implement akka.remote.FailureDetector and have
-      # a public constructor with a com.typesafe.config.Config and
-      # akka.actor.EventStream parameter.
-      implementation-class = "akka.remote.DeadlineFailureDetector"
+      # Reuse inbound connections for outbound messages
+      use-passive-connections = on
 
-      # How often keep-alive heartbeat messages should be sent to each connection.
-      heartbeat-interval = 4 s
+      # Controls the backoff interval after a refused write is reattempted.
+      # (Transports may refuse writes if their internal buffer is full)
+      backoff-interval = 5 ms
 
-      # Number of potentially lost/delayed heartbeats that will be
-      # accepted before considering it to be an anomaly.
-      # A margin to the `heartbeat-interval` is important to be able to survive sudden,
-      # occasional, pauses in heartbeat arrivals, due to for example garbage collect or
-      # network drop.
-      acceptable-heartbeat-pause = 120 s
-    }
+      # Acknowledgment timeout of management commands sent to the transport stack.
+      command-ack-timeout = 30 s
 
+      # The timeout for outbound associations to perform the handshake.
+      # If the transport is akka.remote.classic.netty.tcp or akka.remote.classic.netty.ssl
+      # the configured connection-timeout for the transport will be used instead.
+      handshake-timeout = 15 s
 
-    # Timeout after which the startup of the remoting subsystem is considered
-    # to be failed. Increase this value if your transport drivers (see the
-    # enabled-transports section) need longer time to be loaded.
-    startup-timeout = 10 s
-
-    # Timout after which the graceful shutdown of the remoting subsystem is
-    # considered to be failed. After the timeout the remoting system is
-    # forcefully shut down. Increase this value if your transport drivers
-    # (see the enabled-transports section) need longer time to stop properly.
-    shutdown-timeout = 10 s
-
-    # Before shutting down the drivers, the remoting subsystem attempts to flush
-    # all pending writes. This setting controls the maximum time the remoting is
-    # willing to wait before moving on to shut down the drivers.
-    flush-wait-on-shutdown = 2 s
-
-    # Reuse inbound connections for outbound messages
-    use-passive-connections = on
-
-    # Controls the backoff interval after a refused write is reattempted.
-    # (Transports may refuse writes if their internal buffer is full)
-    backoff-interval = 5 ms
-
-    # Acknowledgment timeout of management commands sent to the transport stack.
-    command-ack-timeout = 30 s
-
-    # The timeout for outbound associations to perform the handshake.
-    # If the transport is akka.remote.classic.netty.tcp or akka.remote.classic.netty.ssl
-    # the configured connection-timeout for the transport will be used instead.
-    handshake-timeout = 15 s
-
-    ### Security settings
-
-    # Enable untrusted mode for full security of server managed actors, prevents
-    # system messages to be send by clients, e.g. messages like 'Create',
-    # 'Suspend', 'Resume', 'Terminate', 'Supervise', 'Link' etc.
-    untrusted-mode = off
-
-    # When 'untrusted-mode=on' inbound actor selections are by default discarded.
-    # Actors with paths defined in this list are granted permission to receive actor
-    # selections messages.
-    # E.g. trusted-selection-paths = ["/user/receptionist", "/user/namingService"]
-    trusted-selection-paths = []
-
-    ### Logging
-
-    # If this is "on", Akka will log all inbound messages at DEBUG level,
-    # if off then they are not logged
-    log-received-messages = off
-
-    # If this is "on", Akka will log all outbound messages at DEBUG level,
-    # if off then they are not logged
-    log-sent-messages = off
-
-    # Sets the log granularity level at which Akka logs remoting events. This setting
-    # can take the values OFF, ERROR, WARNING, INFO, DEBUG, or ON. For compatibility
-    # reasons the setting "on" will default to "debug" level. Please note that the effective
-    # logging level is still determined by the global logging level of the actor system:
-    # for example debug level remoting events will be only logged if the system
-    # is running with debug level logging.
-    # Failures to deserialize received messages also fall under this flag.
-    log-remote-lifecycle-events = on
-
-    # Logging of message types with payload size in bytes larger than
-    # this value. Maximum detected size per message type is logged once,
-    # with an increase threshold of 10%.
-    # By default this feature is turned off. Activate it by setting the property to
-    # a value in bytes, such as 1000b. Note that for all messages larger than this
-    # limit there will be extra performance and scalability cost.
-    log-frame-size-exceeding = off
-
-    # Log warning if the number of messages in the backoff buffer in the endpoint
-    # writer exceeds this limit. It can be disabled by setting the value to off.
-    log-buffer-size-exceeding = 50000
-
-    # After failed to establish an outbound connection, the remoting will mark the
-    # address as failed. This configuration option controls how much time should
-    # be elapsed before reattempting a new connection. While the address is
-    # gated, all messages sent to the address are delivered to dead-letters.
-    # Since this setting limits the rate of reconnects setting it to a
-    # very short interval (i.e. less than a second) may result in a storm of
-    # reconnect attempts.
-    retry-gate-closed-for = 5 s
-
-    # After catastrophic communication failures that result in the loss of system
-    # messages or after the remote DeathWatch triggers the remote system gets
-    # quarantined to prevent inconsistent behavior.
-    # This setting controls how long the Quarantine marker will be kept around
-    # before being removed to avoid long-term memory leaks.
-    # WARNING: DO NOT change this to a small value to re-enable communication with
-    # quarantined nodes. Such feature is not supported and any behavior between
-    # the affected systems after lifting the quarantine is undefined.
-    prune-quarantine-marker-after = 5 d
-
-    # If system messages have been exchanged between two systems (i.e. remote death
-    # watch or remote deployment has been used) a remote system will be marked as
-    # quarantined after the two system has no active association, and no
-    # communication happens during the time configured here.
-    # The only purpose of this setting is to avoid storing system message redelivery
-    # data (sequence number state, etc.) for an undefined amount of time leading to long
-    # term memory leak. Instead, if a system has been gone for this period,
-    # or more exactly
-    # - there is no association between the two systems (TCP connection, if TCP transport is used)
-    # - neither side has been attempting to communicate with the other
-    # - there are no pending system messages to deliver
-    # for the amount of time configured here, the remote system will be quarantined and all state
-    # associated with it will be dropped.
-    #
-    # Maximum value depends on the scheduler's max limit (default 248 days) and if configured
-    # to a longer duration this feature will effectively be disabled. Setting the value to
-    # 'off' will also disable the feature. Note that if disabled there is a risk of a long
-    # term memory leak.
-    quarantine-after-silence = 2 d
-
-    # This setting defines the maximum number of unacknowledged system messages
-    # allowed for a remote system. If this limit is reached the remote system is
-    # declared to be dead and its UID marked as tainted.
-    system-message-buffer-size = 20000
-
-    # This setting defines the maximum idle time after an individual
-    # acknowledgement for system messages is sent. System message delivery
-    # is guaranteed by explicit acknowledgement messages. These acks are
-    # piggybacked on ordinary traffic messages. If no traffic is detected
-    # during the time period configured here, the remoting will send out
-    # an individual ack.
-    system-message-ack-piggyback-timeout = 0.3 s
-
-    # This setting defines the time after internal management signals
-    # between actors (used for DeathWatch and supervision) that have not been
-    # explicitly acknowledged or negatively acknowledged are resent.
-    # Messages that were negatively acknowledged are always immediately
-    # resent.
-    resend-interval = 2 s
-
-    # Maximum number of unacknowledged system messages that will be resent
-    # each 'resend-interval'. If you watch many (> 1000) remote actors you can
-    # increase this value to for example 600, but a too large limit (e.g. 10000)
-    # may flood the connection and might cause false failure detection to trigger.
-    # Test such a configuration by watching all actors at the same time and stop
-    # all watched actors at the same time.
-    resend-limit = 200
-
-    # WARNING: this setting should not be not changed unless all of its consequences
-    # are properly understood which assumes experience with remoting internals
-    # or expert advice.
-    # This setting defines the time after redelivery attempts of internal management
-    # signals are stopped to a remote system that has been not confirmed to be alive by
-    # this system before.
-    initial-system-message-delivery-timeout = 3 m
-
-    ### Transports and adapters
-
-    # List of the transport drivers that will be loaded by the remoting.
-    # A list of fully qualified config paths must be provided where
-    # the given configuration path contains a transport-class key
-    # pointing to an implementation class of the Transport interface.
-    # If multiple transports are provided, the address of the first
-    # one will be used as a default address.
-    enabled-transports = ["akka.remote.classic.netty.tcp"]
-
-    # Transport drivers can be augmented with adapters by adding their
-    # name to the applied-adapters setting in the configuration of a
-    # transport. The available adapters should be configured in this
-    # section by providing a name, and the fully qualified name of
-    # their corresponding implementation. The class given here
-    # must implement akka.akka.remote.transport.TransportAdapterProvider
-    # and have public constructor without parameters.
-    adapters {
-      gremlin = "akka.remote.transport.FailureInjectorProvider"
-      trttl = "akka.remote.transport.ThrottlerProvider"
-    }
+      ### Security settings
 
-    ### Default configuration for the Netty based transport drivers
+      # Enable untrusted mode for full security of server managed actors, prevents
+      # system messages to be send by clients, e.g. messages like 'Create',
+      # 'Suspend', 'Resume', 'Terminate', 'Supervise', 'Link' etc.
+      untrusted-mode = off
 
-    netty.tcp {
-      # The class given here must implement the akka.remote.transport.Transport
-      # interface and offer a public constructor which takes two arguments:
-      #  1) akka.actor.ExtendedActorSystem
-      #  2) com.typesafe.config.Config
-      transport-class = "akka.remote.transport.netty.NettyTransport"
+      # When 'untrusted-mode=on' inbound actor selections are by default discarded.
+      # Actors with paths defined in this list are granted permission to receive actor
+      # selections messages.
+      # E.g. trusted-selection-paths = ["/user/receptionist", "/user/namingService"]
+      trusted-selection-paths = []
 
-      # Transport drivers can be augmented with adapters by adding their
-      # name to the applied-adapters list. The last adapter in the
-      # list is the adapter immediately above the driver, while
-      # the first one is the top of the stack below the standard
-      # Akka protocol
-      applied-adapters = []
-
-      # The default remote server port clients should connect to.
-      # Default is 2552 (AKKA), use 0 if you want a random available port
-      # This port needs to be unique for each actor system on the same machine.
-      port = 2552
-
-      # The hostname or ip clients should connect to.
-      # InetAddress.getLocalHost.getHostAddress is used if empty
-      hostname = ""
-
-      # Use this setting to bind a network interface to a different port
-      # than remoting protocol expects messages at. This may be used
-      # when running akka nodes in a separated networks (under NATs or docker containers).
-      # Use 0 if you want a random available port. Examples:
-      #
-      # akka.remote.classic.netty.tcp.port = 2552
-      # akka.remote.classic.netty.tcp.bind-port = 2553
-      # Network interface will be bound to the 2553 port, but remoting protocol will
-      # expect messages sent to port 2552.
-      #
-      # akka.remote.classic.netty.tcp.port = 0
-      # akka.remote.classic.netty.tcp.bind-port = 0
-      # Network interface will be bound to a random port, and remoting protocol will
-      # expect messages sent to the bound port.
-      #
-      # akka.remote.classic.netty.tcp.port = 2552
-      # akka.remote.classic.netty.tcp.bind-port = 0
-      # Network interface will be bound to a random port, but remoting protocol will
-      # expect messages sent to port 2552.
-      #
-      # akka.remote.classic.netty.tcp.port = 0
-      # akka.remote.classic.netty.tcp.bind-port = 2553
-      # Network interface will be bound to the 2553 port, and remoting protocol will
-      # expect messages sent to the bound port.
-      #
-      # akka.remote.classic.netty.tcp.port = 2552
-      # akka.remote.classic.netty.tcp.bind-port = ""
-      # Network interface will be bound to the 2552 port, and remoting protocol will
-      # expect messages sent to the bound port.
+      ### Logging
+
+      # If this is "on", Akka will log all inbound messages at DEBUG level,
+      # if off then they are not logged
+      log-received-messages = off
+
+      # If this is "on", Akka will log all outbound messages at DEBUG level,
+      # if off then they are not logged
+      log-sent-messages = off
+
+      # Sets the log granularity level at which Akka logs remoting events. This setting
+      # can take the values OFF, ERROR, WARNING, INFO, DEBUG, or ON. For compatibility
+      # reasons the setting "on" will default to "debug" level. Please note that the effective
+      # logging level is still determined by the global logging level of the actor system:
+      # for example debug level remoting events will be only logged if the system
+      # is running with debug level logging.
+      # Failures to deserialize received messages also fall under this flag.
+      log-remote-lifecycle-events = on
+
+      # Logging of message types with payload size in bytes larger than
+      # this value. Maximum detected size per message type is logged once,
+      # with an increase threshold of 10%.
+      # By default this feature is turned off. Activate it by setting the property to
+      # a value in bytes, such as 1000b. Note that for all messages larger than this
+      # limit there will be extra performance and scalability cost.
+      log-frame-size-exceeding = off
+
+      # Log warning if the number of messages in the backoff buffer in the endpoint
+      # writer exceeds this limit. It can be disabled by setting the value to off.
+      log-buffer-size-exceeding = 50000
+
+      # After failed to establish an outbound connection, the remoting will mark the
+      # address as failed. This configuration option controls how much time should
+      # be elapsed before reattempting a new connection. While the address is
+      # gated, all messages sent to the address are delivered to dead-letters.
+      # Since this setting limits the rate of reconnects setting it to a
+      # very short interval (i.e. less than a second) may result in a storm of
+      # reconnect attempts.
+      retry-gate-closed-for = 5 s
+
+      # After catastrophic communication failures that result in the loss of system
+      # messages or after the remote DeathWatch triggers the remote system gets
+      # quarantined to prevent inconsistent behavior.
+      # This setting controls how long the Quarantine marker will be kept around
+      # before being removed to avoid long-term memory leaks.
+      # WARNING: DO NOT change this to a small value to re-enable communication with
+      # quarantined nodes. Such feature is not supported and any behavior between
+      # the affected systems after lifting the quarantine is undefined.
+      prune-quarantine-marker-after = 5 d
+
+      # If system messages have been exchanged between two systems (i.e. remote death
+      # watch or remote deployment has been used) a remote system will be marked as
+      # quarantined after the two system has no active association, and no
+      # communication happens during the time configured here.
+      # The only purpose of this setting is to avoid storing system message redelivery
+      # data (sequence number state, etc.) for an undefined amount of time leading to long
+      # term memory leak. Instead, if a system has been gone for this period,
+      # or more exactly
+      # - there is no association between the two systems (TCP connection, if TCP transport is used)
+      # - neither side has been attempting to communicate with the other
+      # - there are no pending system messages to deliver
+      # for the amount of time configured here, the remote system will be quarantined and all state
+      # associated with it will be dropped.
       #
-      # akka.remote.classic.netty.tcp.port if empty
-      bind-port = ""
-
-      # Use this setting to bind a network interface to a different hostname or ip
-      # than remoting protocol expects messages at.
-      # Use "0.0.0.0" to bind to all interfaces.
-      # akka.remote.classic.netty.tcp.hostname if empty
-      bind-hostname = ""
-
-      # Enables SSL support on this transport
-      enable-ssl = false
-
-      # Sets the connectTimeoutMillis of all outbound connections,
-      # i.e. how long a connect may take until it is timed out
-      connection-timeout = 15 s
-
-      # If set to "<id.of.dispatcher>" then the specified dispatcher
-      # will be used to accept inbound connections, and perform IO. If "" then
-      # dedicated threads will be used.
-      # Please note that the Netty driver only uses this configuration and does
-      # not read the "akka.remote.use-dispatcher" entry. Instead it has to be
-      # configured manually to point to the same dispatcher if needed.
-      use-dispatcher-for-io = ""
-
-      # Sets the high water mark for the in and outbound sockets,
-      # set to 0b for platform default
-      write-buffer-high-water-mark = 0b
-
-      # Sets the low water mark for the in and outbound sockets,
-      # set to 0b for platform default
-      write-buffer-low-water-mark = 0b
-
-      # Sets the send buffer size of the Sockets,
-      # set to 0b for platform default
-      send-buffer-size = 256000b
-
-      # Sets the receive buffer size of the Sockets,
-      # set to 0b for platform default
-      receive-buffer-size = 256000b
-
-      # Maximum message size the transport will accept, but at least
-      # 32000 bytes.
-      # Please note that UDP does not support arbitrary large datagrams,
-      # so this setting has to be chosen carefully when using UDP.
-      # Both send-buffer-size and receive-buffer-size settings has to
-      # be adjusted to be able to buffer messages of maximum size.
-      maximum-frame-size = 128000b
-
-      # Sets the size of the connection backlog
-      backlog = 4096
-
-      # Enables the TCP_NODELAY flag, i.e. disables Nagle’s algorithm
-      tcp-nodelay = on
-
-      # Enables TCP Keepalive, subject to the O/S kernel’s configuration
-      tcp-keepalive = on
-
-      # Enables SO_REUSEADDR, which determines when an ActorSystem can open
-      # the specified listen port (the meaning differs between *nix and Windows)
-      # Valid values are "on", "off" and "off-for-windows"
-      # due to the following Windows bug: https://bugs.java.com/bugdatabase/view_bug.do?bug_id=4476378
-      # "off-for-windows" of course means that it's "on" for all other platforms
-      tcp-reuse-addr = off-for-windows
-
-      # Used to configure the number of I/O worker threads on server sockets
-      server-socket-worker-pool {
-        # Min number of threads to cap factor-based number to
-        pool-size-min = 2
-
-        # The pool size factor is used to determine thread pool size
-        # using the following formula: ceil(available processors * factor).
-        # Resulting size is then bounded by the pool-size-min and
-        # pool-size-max values.
-        pool-size-factor = 1.0
-
-        # Max number of threads to cap factor-based number to
-        pool-size-max = 2
+      # Maximum value depends on the scheduler's max limit (default 248 days) and if configured
+      # to a longer duration this feature will effectively be disabled. Setting the value to
+      # 'off' will also disable the feature. Note that if disabled there is a risk of a long
+      # term memory leak.
+      quarantine-after-silence = 2 d
+
+      # This setting defines the maximum number of unacknowledged system messages
+      # allowed for a remote system. If this limit is reached the remote system is
+      # declared to be dead and its UID marked as tainted.
+      system-message-buffer-size = 20000
+
+      # This setting defines the maximum idle time after an individual
+      # acknowledgement for system messages is sent. System message delivery
+      # is guaranteed by explicit acknowledgement messages. These acks are
+      # piggybacked on ordinary traffic messages. If no traffic is detected
+      # during the time period configured here, the remoting will send out
+      # an individual ack.
+      system-message-ack-piggyback-timeout = 0.3 s
+
+      # This setting defines the time after internal management signals
+      # between actors (used for DeathWatch and supervision) that have not been
+      # explicitly acknowledged or negatively acknowledged are resent.
+      # Messages that were negatively acknowledged are always immediately
+      # resent.
+      resend-interval = 2 s
+
+      # Maximum number of unacknowledged system messages that will be resent
+      # each 'resend-interval'. If you watch many (> 1000) remote actors you can
+      # increase this value to for example 600, but a too large limit (e.g. 10000)
+      # may flood the connection and might cause false failure detection to trigger.
+      # Test such a configuration by watching all actors at the same time and stop
+      # all watched actors at the same time.
+      resend-limit = 200
+
+      # WARNING: this setting should not be not changed unless all of its consequences
+      # are properly understood which assumes experience with remoting internals
+      # or expert advice.
+      # This setting defines the time after redelivery attempts of internal management
+      # signals are stopped to a remote system that has been not confirmed to be alive by
+      # this system before.
+      initial-system-message-delivery-timeout = 3 m
+
+      ### Transports and adapters
+
+      # List of the transport drivers that will be loaded by the remoting.
+      # A list of fully qualified config paths must be provided where
+      # the given configuration path contains a transport-class key
+      # pointing to an implementation class of the Transport interface.
+      # If multiple transports are provided, the address of the first
+      # one will be used as a default address.
+      enabled-transports = ["akka.remote.classic.netty.tcp"]
+
+      # Transport drivers can be augmented with adapters by adding their
+      # name to the applied-adapters setting in the configuration of a
+      # transport. The available adapters should be configured in this
+      # section by providing a name, and the fully qualified name of
+      # their corresponding implementation. The class given here
+      # must implement akka.akka.remote.transport.TransportAdapterProvider
+      # and have public constructor without parameters.
+      adapters {
+        gremlin = "akka.remote.transport.FailureInjectorProvider"
+        trttl = "akka.remote.transport.ThrottlerProvider"
       }
 
-      # Used to configure the number of I/O worker threads on client sockets
-      client-socket-worker-pool {
-        # Min number of threads to cap factor-based number to
-        pool-size-min = 2
+      ### Default configuration for the Netty based transport drivers
 
-        # The pool size factor is used to determine thread pool size
-        # using the following formula: ceil(available processors * factor).
-        # Resulting size is then bounded by the pool-size-min and
-        # pool-size-max values.
-        pool-size-factor = 1.0
+      netty.tcp {
+        # The class given here must implement the akka.remote.transport.Transport
+        # interface and offer a public constructor which takes two arguments:
+        #  1) akka.actor.ExtendedActorSystem
+        #  2) com.typesafe.config.Config
+        transport-class = "akka.remote.transport.netty.NettyTransport"
 
-        # Max number of threads to cap factor-based number to
-        pool-size-max = 2
-      }
+        # Transport drivers can be augmented with adapters by adding their
+        # name to the applied-adapters list. The last adapter in the
+        # list is the adapter immediately above the driver, while
+        # the first one is the top of the stack below the standard
+        # Akka protocol
+        applied-adapters = []
 
+        # The default remote server port clients should connect to.
+        # Default is 2552 (AKKA), use 0 if you want a random available port
+        # This port needs to be unique for each actor system on the same machine.
+        port = 2552
 
-    }
+        # The hostname or ip clients should connect to.
+        # InetAddress.getLocalHost.getHostAddress is used if empty
+        hostname = ""
 
-    netty.ssl = ${akka.remote.classic.netty.tcp}
-    netty.ssl = {
-      # Enable SSL/TLS encryption.
-      # This must be enabled on both the client and server to work.
-      enable-ssl = true
-
-      # Factory of SSLEngine.
-      # Must implement akka.remote.transport.netty.SSLEngineProvider and have a public
-      # constructor with an ActorSystem parameter.
-      # The default ConfigSSLEngineProvider is configured by properties in section
-      # akka.remote.classic.netty.ssl.security
-      #
-      # The SSLEngineProvider can also be defined via ActorSystemSetup with
-      # SSLEngineProviderSetup  when starting the ActorSystem. That is useful when
-      # the SSLEngineProvider implementation requires other external constructor
-      # parameters or is created before the ActorSystem is created.
-      # If such SSLEngineProviderSetup is defined this config property is not used.
-      ssl-engine-provider = akka.remote.transport.netty.ConfigSSLEngineProvider
-
-      security {
-        # This is the Java Key Store used by the server connection
-        key-store = "keystore"
-
-        # This password is used for decrypting the key store
-        key-store-password = "changeme"
-
-        # This password is used for decrypting the key
-        key-password = "changeme"
-
-        # This is the Java Key Store used by the client connection
-        trust-store = "truststore"
-
-        # This password is used for decrypting the trust store
-        trust-store-password = "changeme"
-
-        # Protocol to use for SSL encryption.
-        protocol = "TLSv1.2"
-
-        # Example: ["TLS_DHE_RSA_WITH_AES_128_GCM_SHA256", 
-        #   "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
-        #   "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384",
-        #   "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"]
-        # When doing rolling upgrades, make sure to include both the algorithm used 
-        # by old nodes and the preferred algorithm.
-        # If you use a JDK 8 prior to 8u161 you need to install
-        # the JCE Unlimited Strength Jurisdiction Policy Files to use AES 256.
-        # More info here:
-        # https://www.oracle.com/java/technologies/javase-jce-all-downloads.html
-        enabled-algorithms = ["TLS_DHE_RSA_WITH_AES_256_GCM_SHA384",
-          "TLS_RSA_WITH_AES_128_CBC_SHA"]
-
-        # There are two options, and the default SecureRandom is recommended:
-        # "" or "SecureRandom" => (default)
-        # "SHA1PRNG" => Can be slow because of blocking issues on Linux
+        # Use this setting to bind a network interface to a different port
+        # than remoting protocol expects messages at. This may be used
+        # when running akka nodes in a separated networks (under NATs or docker containers).
+        # Use 0 if you want a random available port. Examples:
         #
-        # Setting a value here may require you to supply the appropriate cipher
-        # suite (see enabled-algorithms section above)
-        random-number-generator = ""
-
-        # Require mutual authentication between TLS peers
+        # akka.remote.classic.netty.tcp.port = 2552
+        # akka.remote.classic.netty.tcp.bind-port = 2553
+        # Network interface will be bound to the 2553 port, but remoting protocol will
+        # expect messages sent to port 2552.
+        #
+        # akka.remote.classic.netty.tcp.port = 0
+        # akka.remote.classic.netty.tcp.bind-port = 0
+        # Network interface will be bound to a random port, and remoting protocol will
+        # expect messages sent to the bound port.
         #
-        # Without mutual authentication only the peer that actively establishes a connection (TLS client side)
-        # checks if the passive side (TLS server side) sends over a trusted certificate. With the flag turned on,
-        # the passive side will also request and verify a certificate from the connecting peer.
+        # akka.remote.classic.netty.tcp.port = 2552
+        # akka.remote.classic.netty.tcp.bind-port = 0
+        # Network interface will be bound to a random port, but remoting protocol will
+        # expect messages sent to port 2552.
         #
-        # To prevent man-in-the-middle attacks this setting is enabled by default.
+        # akka.remote.classic.netty.tcp.port = 0
+        # akka.remote.classic.netty.tcp.bind-port = 2553
+        # Network interface will be bound to the 2553 port, and remoting protocol will
+        # expect messages sent to the bound port.
         #
-        # Note: Nodes that are configured with this setting to 'on' might not be able to receive messages from nodes that
-        # run on older versions of akka-remote. This is because in versions of Akka < 2.4.12 the active side of the remoting
-        # connection will not send over certificates even if asked.
+        # akka.remote.classic.netty.tcp.port = 2552
+        # akka.remote.classic.netty.tcp.bind-port = ""
+        # Network interface will be bound to the 2552 port, and remoting protocol will
+        # expect messages sent to the bound port.
         #
-        # However, starting with Akka 2.4.12, even with this setting "off", the active side (TLS client side)
-        # will use the given key-store to send over a certificate if asked. A rolling upgrade from versions of
-        # Akka < 2.4.12 can therefore work like this:
-        #   - upgrade all nodes to an Akka version >= 2.4.12, in the best case the latest version, but keep this setting at "off"
-        #   - then switch this flag to "on" and do again a rolling upgrade of all nodes
-        # The first step ensures that all nodes will send over a certificate when asked to. The second
-        # step will ensure that all nodes finally enforce the secure checking of client certificates.
-        require-mutual-authentication = on
+        # akka.remote.classic.netty.tcp.port if empty
+        bind-port = ""
+
+        # Use this setting to bind a network interface to a different hostname or ip
+        # than remoting protocol expects messages at.
+        # Use "0.0.0.0" to bind to all interfaces.
+        # akka.remote.classic.netty.tcp.hostname if empty
+        bind-hostname = ""
+
+        # Enables SSL support on this transport
+        enable-ssl = false
+
+        # Sets the connectTimeoutMillis of all outbound connections,
+        # i.e. how long a connect may take until it is timed out
+        connection-timeout = 15 s
+
+        # If set to "<id.of.dispatcher>" then the specified dispatcher
+        # will be used to accept inbound connections, and perform IO. If "" then
+        # dedicated threads will be used.
+        # Please note that the Netty driver only uses this configuration and does
+        # not read the "akka.remote.use-dispatcher" entry. Instead it has to be
+        # configured manually to point to the same dispatcher if needed.
+        use-dispatcher-for-io = ""
+
+        # Sets the high water mark for the in and outbound sockets,
+        # set to 0b for platform default
+        write-buffer-high-water-mark = 0b
+
+        # Sets the low water mark for the in and outbound sockets,
+        # set to 0b for platform default
+        write-buffer-low-water-mark = 0b
+
+        # Sets the send buffer size of the Sockets,
+        # set to 0b for platform default
+        send-buffer-size = 256000b
+
+        # Sets the receive buffer size of the Sockets,
+        # set to 0b for platform default
+        receive-buffer-size = 256000b
+
+        # Maximum message size the transport will accept, but at least
+        # 32000 bytes.
+        # Please note that UDP does not support arbitrary large datagrams,
+        # so this setting has to be chosen carefully when using UDP.
+        # Both send-buffer-size and receive-buffer-size settings has to
+        # be adjusted to be able to buffer messages of maximum size.
+        maximum-frame-size = 128000b
+
+        # Sets the size of the connection backlog
+        backlog = 4096
+
+        # Enables the TCP_NODELAY flag, i.e. disables Nagle’s algorithm
+        tcp-nodelay = on
+
+        # Enables TCP Keepalive, subject to the O/S kernel’s configuration
+        tcp-keepalive = on
+
+        # Enables SO_REUSEADDR, which determines when an ActorSystem can open
+        # the specified listen port (the meaning differs between *nix and Windows)
+        # Valid values are "on", "off" and "off-for-windows"
+        # due to the following Windows bug: https://bugs.java.com/bugdatabase/view_bug.do?bug_id=4476378
+        # "off-for-windows" of course means that it's "on" for all other platforms
+        tcp-reuse-addr = off-for-windows
+
+        # Used to configure the number of I/O worker threads on server sockets
+        server-socket-worker-pool {
+          # Min number of threads to cap factor-based number to
+          pool-size-min = 2
+
+          # The pool size factor is used to determine thread pool size
+          # using the following formula: ceil(available processors * factor).
+          # Resulting size is then bounded by the pool-size-min and
+          # pool-size-max values.
+          pool-size-factor = 1.0
+
+          # Max number of threads to cap factor-based number to
+          pool-size-max = 2
+        }
+
+        # Used to configure the number of I/O worker threads on client sockets
+        client-socket-worker-pool {
+          # Min number of threads to cap factor-based number to
+          pool-size-min = 2
+
+          # The pool size factor is used to determine thread pool size
+          # using the following formula: ceil(available processors * factor).
+          # Resulting size is then bounded by the pool-size-min and
+          # pool-size-max values.
+          pool-size-factor = 1.0
+
+          # Max number of threads to cap factor-based number to
+          pool-size-max = 2
+        }
+
+
       }
-    }
 
-    ### Default configuration for the failure injector transport adapter
+      netty.ssl = ${akka.remote.classic.netty.tcp}
+      netty.ssl = {
+        # Enable SSL/TLS encryption.
+        # This must be enabled on both the client and server to work.
+        enable-ssl = true
 
-    gremlin {
-      # Enable debug logging of the failure injector transport adapter
-      debug = off
-    }
+        # Factory of SSLEngine.
+        # Must implement akka.remote.transport.netty.SSLEngineProvider and have a public
+        # constructor with an ActorSystem parameter.
+        # The default ConfigSSLEngineProvider is configured by properties in section
+        # akka.remote.classic.netty.ssl.security
+        #
+        # The SSLEngineProvider can also be defined via ActorSystemSetup with
+        # SSLEngineProviderSetup  when starting the ActorSystem. That is useful when
+        # the SSLEngineProvider implementation requires other external constructor
+        # parameters or is created before the ActorSystem is created.
+        # If such SSLEngineProviderSetup is defined this config property is not used.
+        ssl-engine-provider = akka.remote.transport.netty.ConfigSSLEngineProvider
+
+        security {
+          # This is the Java Key Store used by the server connection
+          key-store = "keystore"
 
-    backoff-remote-dispatcher {
-      type = Dispatcher
-      executor = "fork-join-executor"
-      fork-join-executor {
-        # Min number of threads to cap factor-based parallelism number to
-        parallelism-min = 2
-        parallelism-max = 2
+          # This password is used for decrypting the key store
+          key-store-password = "changeme"
+
+          # This password is used for decrypting the key
+          key-password = "changeme"
+
+          # This is the Java Key Store used by the client connection
+          trust-store = "truststore"
+
+          # This password is used for decrypting the trust store
+          trust-store-password = "changeme"
+
+          # Protocol to use for SSL encryption.
+          protocol = "TLSv1.2"
+
+          # Example: ["TLS_DHE_RSA_WITH_AES_128_GCM_SHA256", 
+          #   "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+          #   "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384",
+          #   "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"]
+          # When doing rolling upgrades, make sure to include both the algorithm used 
+          # by old nodes and the preferred algorithm.
+          # If you use a JDK 8 prior to 8u161 you need to install
+          # the JCE Unlimited Strength Jurisdiction Policy Files to use AES 256.
+          # More info here:
+          # https://www.oracle.com/java/technologies/javase-jce-all-downloads.html
+          enabled-algorithms = ["TLS_DHE_RSA_WITH_AES_256_GCM_SHA384",
+            "TLS_RSA_WITH_AES_128_CBC_SHA"]
+
+          # There are two options, and the default SecureRandom is recommended:
+          # "" or "SecureRandom" => (default)
+          # "SHA1PRNG" => Can be slow because of blocking issues on Linux
+          #
+          # Setting a value here may require you to supply the appropriate cipher
+          # suite (see enabled-algorithms section above)
+          random-number-generator = ""
+
+          # Require mutual authentication between TLS peers
+          #
+          # Without mutual authentication only the peer that actively establishes a connection (TLS client side)
+          # checks if the passive side (TLS server side) sends over a trusted certificate. With the flag turned on,
+          # the passive side will also request and verify a certificate from the connecting peer.
+          #
+          # To prevent man-in-the-middle attacks this setting is enabled by default.
+          #
+          # Note: Nodes that are configured with this setting to 'on' might not be able to receive messages from nodes that
+          # run on older versions of akka-remote. This is because in versions of Akka < 2.4.12 the active side of the remoting
+          # connection will not send over certificates even if asked.
+          #
+          # However, starting with Akka 2.4.12, even with this setting "off", the active side (TLS client side)
+          # will use the given key-store to send over a certificate if asked. A rolling upgrade from versions of
+          # Akka < 2.4.12 can therefore work like this:
+          #   - upgrade all nodes to an Akka version >= 2.4.12, in the best case the latest version, but keep this setting at "off"
+          #   - then switch this flag to "on" and do again a rolling upgrade of all nodes
+          # The first step ensures that all nodes will send over a certificate when asked to. The second
+          # step will ensure that all nodes finally enforce the secure checking of client certificates.
+          require-mutual-authentication = on
+        }
+      }
+
+      ### Default configuration for the failure injector transport adapter
+
+      gremlin {
+        # Enable debug logging of the failure injector transport adapter
+        debug = off
+      }
+
+      backoff-remote-dispatcher {
+        type = Dispatcher
+        executor = "fork-join-executor"
+        fork-join-executor {
+          # Min number of threads to cap factor-based parallelism number to
+          parallelism-min = 2
+          parallelism-max = 2
+        }
       }
     }
   }
index 50109a16308a613ecbe216c40ae6cd15b9103a40..cc222188b55f9b8d12988b0ea9314409c978d864 100644 (file)
@@ -13,7 +13,7 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>bundle-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../../bundle-parent</relativePath>
     </parent>
 
                 <extensions>true</extensions>
                 <configuration>
                     <instructions>
+                        <Automatic-Module-Name>org.opendaylight.controller.repackaged.akka</Automatic-Module-Name>
                         <Export-Package>
                             akka.*,
                             com.typesafe.sslconfig.akka.*,
index 65d5b0d3ce2af3f660cb97b0251b03b4d719d828..79e8d88f08ab0a1c147aa77d8801def96d6ac31b 100644 (file)
     <parent>
         <groupId>org.opendaylight.odlparent</groupId>
         <artifactId>odlparent-lite</artifactId>
-        <version>9.0.12</version>
+        <version>13.0.11</version>
         <relativePath/>
     </parent>
 
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>controller-artifacts</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <packaging>pom</packaging>
 
     <dependencyManagement>
                 <version>${project.version}</version>
             </dependency>
 
+            <!-- Atomix -->
+            <dependency>
+                <groupId>${project.groupId}</groupId>
+                <artifactId>atomix-storage</artifactId>
+                <version>${project.version}</version>
+            </dependency>
+
             <!-- Core API/implementation -->
             <dependency>
                 <groupId>${project.groupId}</groupId>
                 <classifier>features</classifier>
                 <type>xml</type>
             </dependency>
-
-            <!-- Config remnants -->
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>netty-event-executor-config</artifactId>
-                <version>0.16.0-SNAPSHOT</version>
-            </dependency>
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>netty-threadgroup-config</artifactId>
-                <version>0.16.0-SNAPSHOT</version>
-            </dependency>
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>netty-timer-config</artifactId>
-                <version>0.16.0-SNAPSHOT</version>
-            </dependency>
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>threadpool-config-api</artifactId>
-                <version>0.16.0-SNAPSHOT</version>
-            </dependency>
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>threadpool-config-impl</artifactId>
-                <version>0.16.0-SNAPSHOT</version>
-            </dependency>
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>odl-controller-exp-netty-config</artifactId>
-                <version>${project.version}</version>
-                <classifier>features</classifier>
-                <type>xml</type>
-            </dependency>
         </dependencies>
     </dependencyManagement>
 </project>
diff --git a/atomix-storage/LICENSE b/atomix-storage/LICENSE
new file mode 100644 (file)
index 0000000..d645695
--- /dev/null
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/atomix-storage/pom.xml b/atomix-storage/pom.xml
new file mode 100644 (file)
index 0000000..886ad6a
--- /dev/null
@@ -0,0 +1,149 @@
+<!--
+  ~ Copyright 2017-2021 Open Networking Foundation
+  ~ Copyright 2023 PANTHEON.tech, s.r.o.
+  ~
+  ~ Licensed under the Apache License, Version 2.0 (the "License");
+  ~ you may not use this file except in compliance with the License.
+  ~ You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+
+  <parent>
+    <groupId>org.opendaylight.controller</groupId>
+    <artifactId>bundle-parent</artifactId>
+    <version>9.0.3-SNAPSHOT</version>
+    <relativePath>../bundle-parent</relativePath>
+  </parent>
+
+  <artifactId>atomix-storage</artifactId>
+  <name>Atomix Storage</name>
+  <packaging>bundle</packaging>
+
+  <properties>
+    <odlparent.checkstyle.skip>true</odlparent.checkstyle.skip>
+    <odlparent.spotbugs.enforce>false</odlparent.spotbugs.enforce>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.jdt</groupId>
+      <artifactId>org.eclipse.jdt.annotation</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.esotericsoftware</groupId>
+      <artifactId>kryo</artifactId>
+      <version>4.0.3</version>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.esotericsoftware</groupId>
+      <artifactId>minlog</artifactId>
+      <version>1.3.1</version>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.esotericsoftware</groupId>
+      <artifactId>reflectasm</artifactId>
+      <version>1.11.9</version>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.objenesis</groupId>
+      <artifactId>objenesis</artifactId>
+      <version>2.6</version>
+      <scope>provided</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava-testlib</artifactId>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <!-- This project has a different license -->
+      <plugin>
+        <artifactId>maven-dependency-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>unpack-license</id>
+            <configuration>
+              <skip>true</skip>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <artifactId>maven-antrun-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>copy-license</id>
+            <phase>prepare-package</phase>
+            <goals>
+              <goal>run</goal>
+            </goals>
+            <configuration>
+              <target>
+                <copy file="LICENSE" tofile="${project.build.directory}/classes/LICENSE"/>
+              </target>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <artifactId>maven-checkstyle-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>check-license</id>
+            <goals>
+              <goal>check</goal>
+            </goals>
+            <configuration>
+              <skip>true</skip>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+
+      <plugin>
+        <groupId>org.apache.felix</groupId>
+        <artifactId>maven-bundle-plugin</artifactId>
+        <extensions>true</extensions>
+        <configuration>
+          <instructions>
+            <Export-Package>
+              io.atomix.storage.journal
+            </Export-Package>
+            <Import-Package>
+              sun.nio.ch;resolution:=optional,
+              sun.misc;resolution:=optional,
+              !COM.newmonics.*,
+              !android.os,
+              *
+            </Import-Package>
+
+            <!-- Kryo is using ancient objenesis, so let's embed it to prevent duplicates -->
+            <Embed-Dependency>
+                *;inline=true;groupId=com.esotericsoftware,
+                *;inline=true;groupId=org.objenesis,
+            </Embed-Dependency>
+          </instructions>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+</project>
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/BufferCleaner.java b/atomix-storage/src/main/java/io/atomix/storage/journal/BufferCleaner.java
new file mode 100644 (file)
index 0000000..8244e57
--- /dev/null
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2019-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.nio.ByteBuffer;
+import java.security.AccessController;
+import java.security.PrivilegedAction;
+import java.util.Objects;
+
+import static java.lang.invoke.MethodHandles.constant;
+import static java.lang.invoke.MethodHandles.dropArguments;
+import static java.lang.invoke.MethodHandles.filterReturnValue;
+import static java.lang.invoke.MethodHandles.guardWithTest;
+import static java.lang.invoke.MethodHandles.lookup;
+import static java.lang.invoke.MethodType.methodType;
+
+/**
+ * Utility class which allows explicit calls to the DirectByteBuffer cleaner method instead of relying on GC.
+ */
+public class BufferCleaner {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(BufferCleaner.class);
+
+  /**
+   * Reference to a Cleaner that does unmapping; no-op if not supported.
+   */
+  private static final Cleaner CLEANER;
+
+  static {
+    final Object hack = AccessController.doPrivileged((PrivilegedAction<Object>) BufferCleaner::unmapHackImpl);
+    if (hack instanceof Cleaner) {
+      CLEANER = (Cleaner) hack;
+      LOGGER.debug("java.nio.DirectByteBuffer.cleaner(): available");
+    } else {
+      CLEANER = (ByteBuffer buffer) -> {
+        // noop
+      };
+      LOGGER.debug("java.nio.DirectByteBuffer.cleaner(): unavailable: {}", hack);
+    }
+  }
+
+  private static Object unmapHackImpl() {
+    final MethodHandles.Lookup lookup = lookup();
+    try {
+      try {
+        // *** sun.misc.Unsafe unmapping (Java 9+) ***
+        final Class<?> unsafeClass = Class.forName("sun.misc.Unsafe");
+        // first check if Unsafe has the right method, otherwise we can give up
+        // without doing any security critical stuff:
+        final MethodHandle unmapper = lookup.findVirtual(unsafeClass, "invokeCleaner",
+            methodType(void.class, ByteBuffer.class));
+        // fetch the unsafe instance and bind it to the virtual MH:
+        final Field f = unsafeClass.getDeclaredField("theUnsafe");
+        f.setAccessible(true);
+        final Object theUnsafe = f.get(null);
+        return newBufferCleaner(ByteBuffer.class, unmapper.bindTo(theUnsafe));
+      } catch (SecurityException se) {
+        // rethrow to report errors correctly (we need to catch it here, as we also catch RuntimeException below!):
+        throw se;
+      } catch (ReflectiveOperationException | RuntimeException e) {
+        // *** sun.misc.Cleaner unmapping (Java 8) ***
+        final Class<?> directBufferClass = Class.forName("java.nio.DirectByteBuffer");
+
+        final Method m = directBufferClass.getMethod("cleaner");
+        m.setAccessible(true);
+        final MethodHandle directBufferCleanerMethod = lookup.unreflect(m);
+        final Class<?> cleanerClass = directBufferCleanerMethod.type().returnType();
+
+        /* "Compile" a MH that basically is equivalent to the following code:
+         * void unmapper(ByteBuffer byteBuffer) {
+         *   sun.misc.Cleaner cleaner = ((java.nio.DirectByteBuffer) byteBuffer).cleaner();
+         *   if (Objects.nonNull(cleaner)) {
+         *     cleaner.clean();
+         *   } else {
+         *     noop(cleaner); // the noop is needed because MethodHandles#guardWithTest always needs ELSE
+         *   }
+         * }
+         */
+        final MethodHandle cleanMethod = lookup.findVirtual(cleanerClass, "clean", methodType(void.class));
+        final MethodHandle nonNullTest = lookup.findStatic(Objects.class, "nonNull", methodType(boolean.class, Object.class))
+            .asType(methodType(boolean.class, cleanerClass));
+        final MethodHandle noop = dropArguments(constant(Void.class, null).asType(methodType(void.class)), 0, cleanerClass);
+        final MethodHandle unmapper = filterReturnValue(directBufferCleanerMethod, guardWithTest(nonNullTest, cleanMethod, noop))
+            .asType(methodType(void.class, ByteBuffer.class));
+        return newBufferCleaner(directBufferClass, unmapper);
+      }
+    } catch (SecurityException se) {
+      return "Unmapping is not supported, because not all required permissions are given to the Lucene JAR file: "
+          + se + " [Please grant at least the following permissions: RuntimePermission(\"accessClassInPackage.sun.misc\") "
+          + " and ReflectPermission(\"suppressAccessChecks\")]";
+    } catch (ReflectiveOperationException | RuntimeException e) {
+      return "Unmapping is not supported on this platform, because internal Java APIs are not compatible with this Atomix version: " + e;
+    }
+  }
+
+  private static Cleaner newBufferCleaner(final Class<?> unmappableBufferClass, final MethodHandle unmapper) {
+    return (ByteBuffer buffer) -> {
+      if (!buffer.isDirect()) {
+        return;
+      }
+      if (!unmappableBufferClass.isInstance(buffer)) {
+        throw new IllegalArgumentException("buffer is not an instance of " + unmappableBufferClass.getName());
+      }
+      final Throwable error = AccessController.doPrivileged((PrivilegedAction<Throwable>) () -> {
+        try {
+          unmapper.invokeExact(buffer);
+          return null;
+        } catch (Throwable t) {
+          return t;
+        }
+      });
+      if (error != null) {
+        throw new IOException("Unable to unmap the mapped buffer", error);
+      }
+    };
+  }
+
+  /**
+   * Free {@link ByteBuffer} if possible.
+   */
+  public static void freeBuffer(ByteBuffer buffer) throws IOException {
+    CLEANER.freeBuffer(buffer);
+  }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/Cleaner.java b/atomix-storage/src/main/java/io/atomix/storage/journal/Cleaner.java
new file mode 100644 (file)
index 0000000..d812680
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+@FunctionalInterface
+interface Cleaner {
+
+  /**
+   * Free {@link ByteBuffer} if possible.
+   */
+  void freeBuffer(ByteBuffer buffer) throws IOException;
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/CommitsSegmentJournalReader.java b/atomix-storage/src/main/java/io/atomix/storage/journal/CommitsSegmentJournalReader.java
new file mode 100644 (file)
index 0000000..ac80fed
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+/**
+ * A {@link JournalReader} traversing only committed entries.
+ */
+final class CommitsSegmentJournalReader<E> extends SegmentedJournalReader<E> {
+    CommitsSegmentJournalReader(SegmentedJournal<E> journal, JournalSegment<E> segment) {
+        super(journal, segment);
+    }
+
+    @Override
+    public Indexed<E> tryNext() {
+        return getNextIndex() <= journal.getCommitIndex() ? super.tryNext() : null;
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/DiskFileReader.java b/atomix-storage/src/main/java/io/atomix/storage/journal/DiskFileReader.java
new file mode 100644 (file)
index 0000000..311d16b
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static com.google.common.base.Verify.verify;
+import static java.util.Objects.requireNonNull;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.file.Path;
+import org.eclipse.jdt.annotation.NonNull;
+
+/**
+ * A {@link StorageLevel#DISK} implementation of {@link FileReader}. Maintains an internal buffer.
+ */
+final class DiskFileReader extends FileReader {
+    /**
+     * Just do not bother with IO smaller than this many bytes.
+     */
+    private static final int MIN_IO_SIZE = 8192;
+
+    private final FileChannel channel;
+    private final ByteBuffer buffer;
+
+    // tracks where memory's first available byte maps to in terms of FileChannel.position()
+    private int bufferPosition;
+
+    DiskFileReader(final Path path, final FileChannel channel, final int maxSegmentSize, final int maxEntrySize) {
+        this(path, channel, allocateBuffer(maxSegmentSize, maxEntrySize));
+    }
+
+    // Note: take ownership of the buffer
+    DiskFileReader(final Path path, final FileChannel channel, final ByteBuffer buffer) {
+        super(path);
+        this.channel = requireNonNull(channel);
+        this.buffer = buffer.flip();
+        bufferPosition = 0;
+    }
+
+    static ByteBuffer allocateBuffer(final int maxSegmentSize, final int maxEntrySize) {
+        return ByteBuffer.allocate(chooseBufferSize(maxSegmentSize, maxEntrySize));
+    }
+
+    private static int chooseBufferSize(final int maxSegmentSize, final int maxEntrySize) {
+        if (maxSegmentSize <= MIN_IO_SIZE) {
+            // just buffer the entire segment
+            return maxSegmentSize;
+        }
+
+        // one full entry plus its header, or MIN_IO_SIZE, which benefits the read of many small entries
+        final int minBufferSize = maxEntrySize + SegmentEntry.HEADER_BYTES;
+        return minBufferSize <= MIN_IO_SIZE ? MIN_IO_SIZE : minBufferSize;
+    }
+
+    @Override
+    void invalidateCache() {
+        buffer.clear().flip();
+        bufferPosition = 0;
+    }
+
+    @Override
+    ByteBuffer read(final int position, final int size) {
+        // calculate logical seek distance between buffer's first byte and position and split flow between
+        // forward-moving and backwards-moving code paths.
+        final int seek = bufferPosition - position;
+        return seek >= 0 ? forwardAndRead(seek, position, size) : rewindAndRead(-seek, position, size);
+    }
+
+    private @NonNull ByteBuffer forwardAndRead(final int seek, final int position, final int size) {
+        final int missing = buffer.limit() - seek - size;
+        if (missing <= 0) {
+            // fast path: we have the requested region
+            return buffer.slice(seek, size).asReadOnlyBuffer();
+        }
+
+        // We need to read more data, but let's salvage what we can:
+        // - set buffer position to seek, which means it points to the same as position
+        // - run compact, which moves everything between position and limit onto the beginning of buffer and
+        //   sets it up to receive more bytes
+        // - start the read accounting for the seek
+        buffer.position(seek).compact();
+        readAtLeast(position + seek, missing);
+        return setAndSlice(position, size);
+    }
+
+    private @NonNull ByteBuffer rewindAndRead(final int rewindBy, final int position, final int size) {
+        // TODO: Lazy solution. To be super crisp, we want to find out how much of the buffer we can salvage and
+        //       do all the limit/position fiddling before and after read. Right now let's just flow the buffer up and
+        //       read it.
+        buffer.clear();
+        readAtLeast(position, size);
+        return setAndSlice(position, size);
+    }
+
+    private void readAtLeast(final int readPosition, final int readAtLeast) {
+        final int bytesRead;
+        try {
+            bytesRead = channel.read(buffer, readPosition);
+        } catch (IOException e) {
+            throw new StorageException(e);
+        }
+        verify(bytesRead >= readAtLeast, "Short read %s, expected %s", bytesRead, readAtLeast);
+        buffer.flip();
+    }
+
+    private @NonNull ByteBuffer setAndSlice(final int position, final int size) {
+        bufferPosition = position;
+        return buffer.slice(0, size).asReadOnlyBuffer();
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/DiskJournalSegmentWriter.java b/atomix-storage/src/main/java/io/atomix/storage/journal/DiskJournalSegmentWriter.java
new file mode 100644 (file)
index 0000000..2663201
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static io.atomix.storage.journal.SegmentEntry.HEADER_BYTES;
+
+import io.atomix.storage.journal.index.JournalIndex;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.MappedByteBuffer;
+import java.nio.channels.FileChannel;
+
+/**
+ * Segment writer.
+ * <p>
+ * The format of an entry in the log is as follows:
+ * <ul>
+ * <li>64-bit index</li>
+ * <li>8-bit boolean indicating whether a term change is contained in the entry</li>
+ * <li>64-bit optional term</li>
+ * <li>32-bit signed entry length, including the entry type ID</li>
+ * <li>8-bit signed entry type ID</li>
+ * <li>n-bit entry bytes</li>
+ * </ul>
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+final class DiskJournalSegmentWriter<E> extends JournalSegmentWriter<E> {
+    private static final ByteBuffer ZERO_ENTRY_HEADER = ByteBuffer.wrap(new byte[HEADER_BYTES]);
+
+    private final JournalSegmentReader<E> reader;
+    private final ByteBuffer buffer;
+
+    DiskJournalSegmentWriter(final FileChannel channel, final JournalSegment<E> segment, final int maxEntrySize,
+        final JournalIndex index, final JournalSerdes namespace) {
+        super(channel, segment, maxEntrySize, index, namespace);
+
+        buffer = DiskFileReader.allocateBuffer(maxSegmentSize, maxEntrySize);
+        reader = new JournalSegmentReader<>(segment,
+            new DiskFileReader(segment.file().file().toPath(), channel, buffer), maxEntrySize, namespace);
+        reset(0);
+    }
+
+    DiskJournalSegmentWriter(final JournalSegmentWriter<E> previous) {
+        super(previous);
+
+        buffer = DiskFileReader.allocateBuffer(maxSegmentSize, maxEntrySize);
+        reader = new JournalSegmentReader<>(segment,
+            new DiskFileReader(segment.file().file().toPath(), channel, buffer), maxEntrySize, namespace);
+    }
+
+    @Override
+    MappedByteBuffer buffer() {
+        return null;
+    }
+
+    @Override
+    MappedJournalSegmentWriter<E> toMapped() {
+        return new MappedJournalSegmentWriter<>(this);
+    }
+
+    @Override
+    DiskJournalSegmentWriter<E> toFileChannel() {
+        return this;
+    }
+
+    @Override
+    JournalSegmentReader<E> reader() {
+        return reader;
+    }
+
+    @Override
+    ByteBuffer startWrite(final int position, final int size) {
+        return buffer.clear().slice(0, size);
+    }
+
+    @Override
+    void commitWrite(final int position, final ByteBuffer entry) {
+        try {
+            channel.write(entry, position);
+        } catch (IOException e) {
+            throw new StorageException(e);
+        }
+    }
+
+    @Override
+    void writeEmptyHeader(final int position) {
+        try {
+            channel.write(ZERO_ENTRY_HEADER.asReadOnlyBuffer(), position);
+        } catch (IOException e) {
+            throw new StorageException(e);
+        }
+    }
+
+    @Override
+    void flush() {
+        try {
+            if (channel.isOpen()) {
+                channel.force(true);
+            }
+        } catch (IOException e) {
+            throw new StorageException(e);
+        }
+    }
+
+    @Override
+    void close() {
+        flush();
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/FileReader.java b/atomix-storage/src/main/java/io/atomix/storage/journal/FileReader.java
new file mode 100644 (file)
index 0000000..fdc0597
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.base.MoreObjects;
+import java.nio.ByteBuffer;
+import java.nio.file.Path;
+import org.eclipse.jdt.annotation.NonNull;
+
+/**
+ * An abstraction over how to read a {@link JournalSegmentFile}.
+ */
+abstract sealed class FileReader permits DiskFileReader, MappedFileReader {
+    private final Path path;
+
+    FileReader(final Path path) {
+        this.path = requireNonNull(path);
+    }
+
+    /**
+     * Invalidate any cache that is present, so that the next read is coherent with the backing file.
+     */
+    abstract void invalidateCache();
+
+    /**
+     * Read the some bytes as specified position. The sum of position and size is guaranteed not to exceed the maximum
+     * segment size nor maximum entry size.
+     *
+     * @param position position to the entry header
+     * @param size to read
+     * @return resulting buffer
+     */
+    abstract @NonNull ByteBuffer read(int position, int size);
+
+    @Override
+    public final String toString() {
+        return MoreObjects.toStringHelper(this).add("path", path).toString();
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/Indexed.java b/atomix-storage/src/main/java/io/atomix/storage/journal/Indexed.java
new file mode 100644 (file)
index 0000000..5bf7e6f
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.base.MoreObjects;
+import org.eclipse.jdt.annotation.NonNullByDefault;
+
+/**
+ * Indexed journal entry.
+ *
+ * @param <E> entry type
+ * @param index the entry index
+ * @param entry the indexed entry
+ * @param size the serialized entry size
+ */
+// FIXME: it seems 'index' has to be non-zero, we should enforce that if that really is the case
+// FIXME: it seems 'size' has not be non-zero, we should enforce that if that really is the case
+@NonNullByDefault
+public record Indexed<E>(long index, E entry, int size) {
+    public Indexed {
+        requireNonNull(entry);
+    }
+
+    @Override
+    public String toString() {
+        return MoreObjects.toStringHelper(this).add("index", index).add("entry", entry).toString();
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/Journal.java b/atomix-storage/src/main/java/io/atomix/storage/journal/Journal.java
new file mode 100644 (file)
index 0000000..5e37c12
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import java.io.Closeable;
+
+/**
+ * Journal.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+public interface Journal<E> extends Closeable {
+
+  /**
+   * Returns the journal writer.
+   *
+   * @return The journal writer.
+   */
+  JournalWriter<E> writer();
+
+  /**
+   * Opens a new journal reader.
+   *
+   * @param index The index at which to start the reader.
+   * @return A new journal reader.
+   */
+  JournalReader<E> openReader(long index);
+
+  /**
+   * Opens a new journal reader.
+   *
+   * @param index The index at which to start the reader.
+   * @param mode the reader mode
+   * @return A new journal reader.
+   */
+  JournalReader<E> openReader(long index, JournalReader.Mode mode);
+
+  /**
+   * Returns a boolean indicating whether the journal is open.
+   *
+   * @return Indicates whether the journal is open.
+   */
+  boolean isOpen();
+
+  @Override
+  void close();
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalReader.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalReader.java
new file mode 100644 (file)
index 0000000..700f40d
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import org.eclipse.jdt.annotation.Nullable;
+
+/**
+ * Log reader.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+public interface JournalReader<E> extends AutoCloseable {
+    /**
+     * Raft log reader mode.
+     */
+    enum Mode {
+        /**
+         * Reads all entries from the log.
+         */
+        ALL,
+        /**
+         * Reads committed entries from the log.
+         */
+        COMMITS,
+    }
+
+    /**
+     * Returns the first index in the journal.
+     *
+     * @return the first index in the journal
+     */
+    long getFirstIndex();
+
+    /**
+     * Returns the last read entry.
+     *
+     * @return The last read entry.
+     */
+    Indexed<E> getCurrentEntry();
+
+    /**
+     * Returns the next reader index.
+     *
+     * @return The next reader index.
+     */
+    long getNextIndex();
+
+    /**
+     * Try to move to the next entry.
+     *
+     * @return The next entry in the reader, or {@code null} if there is no next entry.
+     */
+    @Nullable Indexed<E> tryNext();
+
+    /**
+     * Resets the reader to the start.
+     */
+    void reset();
+
+    /**
+     * Resets the reader to the given index.
+     *
+     * @param index The index to which to reset the reader.
+     */
+    void reset(long index);
+
+    @Override
+    void close();
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegment.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegment.java
new file mode 100644 (file)
index 0000000..9239f86
--- /dev/null
@@ -0,0 +1,270 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import com.google.common.base.MoreObjects;
+import io.atomix.storage.journal.index.JournalIndex;
+import io.atomix.storage.journal.index.Position;
+import io.atomix.storage.journal.index.SparseJournalIndex;
+import java.io.IOException;
+import java.nio.channels.FileChannel;
+import java.nio.file.Files;
+import java.nio.file.StandardOpenOption;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.eclipse.jdt.annotation.Nullable;
+
+/**
+ * Log segment.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+final class JournalSegment<E> implements AutoCloseable {
+  private final JournalSegmentFile file;
+  private final JournalSegmentDescriptor descriptor;
+  private final StorageLevel storageLevel;
+  private final int maxEntrySize;
+  private final JournalIndex journalIndex;
+  private final JournalSerdes namespace;
+  private final Set<JournalSegmentReader<E>> readers = ConcurrentHashMap.newKeySet();
+  private final AtomicInteger references = new AtomicInteger();
+  private final FileChannel channel;
+
+  private JournalSegmentWriter<E> writer;
+  private boolean open = true;
+
+  JournalSegment(
+      JournalSegmentFile file,
+      JournalSegmentDescriptor descriptor,
+      StorageLevel storageLevel,
+      int maxEntrySize,
+      double indexDensity,
+      JournalSerdes namespace) {
+    this.file = file;
+    this.descriptor = descriptor;
+    this.storageLevel = storageLevel;
+    this.maxEntrySize = maxEntrySize;
+    this.namespace = namespace;
+    journalIndex = new SparseJournalIndex(indexDensity);
+    try {
+      channel = FileChannel.open(file.file().toPath(),
+        StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE);
+    } catch (IOException e) {
+      throw new StorageException(e);
+    }
+    writer = switch (storageLevel) {
+        case DISK -> new DiskJournalSegmentWriter<>(channel, this, maxEntrySize, journalIndex, namespace);
+        case MAPPED -> new MappedJournalSegmentWriter<>(channel, this, maxEntrySize, journalIndex, namespace)
+            .toFileChannel();
+    };
+  }
+
+  /**
+   * Returns the segment's starting index.
+   *
+   * @return The segment's starting index.
+   */
+  long firstIndex() {
+    return descriptor.index();
+  }
+
+  /**
+   * Returns the last index in the segment.
+   *
+   * @return The last index in the segment.
+   */
+  long lastIndex() {
+    return writer.getLastIndex();
+  }
+
+  /**
+   * Returns the size of the segment.
+   *
+   * @return the size of the segment
+   */
+  int size() {
+    try {
+      return (int) channel.size();
+    } catch (IOException e) {
+      throw new StorageException(e);
+    }
+  }
+
+  /**
+   * Returns the segment file.
+   *
+   * @return The segment file.
+   */
+  JournalSegmentFile file() {
+    return file;
+  }
+
+  /**
+   * Returns the segment descriptor.
+   *
+   * @return The segment descriptor.
+   */
+  JournalSegmentDescriptor descriptor() {
+    return descriptor;
+  }
+
+  /**
+   * Looks up the position of the given index.
+   *
+   * @param index the index to lookup
+   * @return the position of the given index or a lesser index, or {@code null}
+   */
+  @Nullable Position lookup(long index) {
+    return journalIndex.lookup(index);
+  }
+
+  /**
+   * Acquires a reference to the log segment.
+   */
+  private void acquire() {
+    if (references.getAndIncrement() == 0 && storageLevel == StorageLevel.MAPPED) {
+      writer = writer.toMapped();
+    }
+  }
+
+  /**
+   * Releases a reference to the log segment.
+   */
+  private void release() {
+    if (references.decrementAndGet() == 0) {
+      if (storageLevel == StorageLevel.MAPPED) {
+        writer = writer.toFileChannel();
+      }
+      if (!open) {
+        finishClose();
+      }
+    }
+  }
+
+  /**
+   * Acquires a reference to the segment writer.
+   *
+   * @return The segment writer.
+   */
+  JournalSegmentWriter<E> acquireWriter() {
+    checkOpen();
+    acquire();
+
+    return writer;
+  }
+
+  /**
+   * Releases the reference to the segment writer.
+   */
+  void releaseWriter() {
+      release();
+  }
+
+  /**
+   * Creates a new segment reader.
+   *
+   * @return A new segment reader.
+   */
+  JournalSegmentReader<E> createReader() {
+    checkOpen();
+    acquire();
+
+    final var buffer = writer.buffer();
+    final var path = file.file().toPath();
+    final var fileReader = buffer != null ? new MappedFileReader(path, buffer)
+        : new DiskFileReader(path, channel, descriptor.maxSegmentSize(), maxEntrySize);
+    final var reader = new JournalSegmentReader<>(this, fileReader, maxEntrySize, namespace);
+    reader.setPosition(JournalSegmentDescriptor.BYTES);
+    readers.add(reader);
+    return reader;
+  }
+
+  /**
+   * Closes a segment reader.
+   *
+   * @param reader the closed segment reader
+   */
+  void closeReader(JournalSegmentReader<E> reader) {
+    if (readers.remove(reader)) {
+      release();
+    }
+  }
+
+  /**
+   * Checks whether the segment is open.
+   */
+  private void checkOpen() {
+    if (!open) {
+      throw new IllegalStateException("Segment not open");
+    }
+  }
+
+  /**
+   * Returns a boolean indicating whether the segment is open.
+   *
+   * @return indicates whether the segment is open
+   */
+  public boolean isOpen() {
+    return open;
+  }
+
+  /**
+   * Closes the segment.
+   */
+  @Override
+  public void close() {
+    if (!open) {
+      return;
+    }
+
+    open = false;
+    readers.forEach(JournalSegmentReader::close);
+    if (references.get() == 0) {
+      finishClose();
+    }
+  }
+
+  private void finishClose() {
+    writer.close();
+    try {
+      channel.close();
+    } catch (IOException e) {
+      throw new StorageException(e);
+    }
+  }
+
+  /**
+   * Deletes the segment.
+   */
+  void delete() {
+    try {
+      Files.deleteIfExists(file.file().toPath());
+    } catch (IOException e) {
+      throw new StorageException(e);
+    }
+  }
+
+  @Override
+  public String toString() {
+    return MoreObjects.toStringHelper(this)
+        .add("id", descriptor.id())
+        .add("version", descriptor.version())
+        .add("index", firstIndex())
+        .toString();
+  }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentDescriptor.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentDescriptor.java
new file mode 100644 (file)
index 0000000..757ca3a
--- /dev/null
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2015-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import java.nio.ByteBuffer;
+
+import static com.google.common.base.MoreObjects.toStringHelper;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Stores information about a {@link JournalSegment} of the log.
+ * <p>
+ * The segment descriptor manages metadata related to a single segment of the log. Descriptors are stored within the
+ * first {@code 64} bytes of each segment in the following order:
+ * <ul>
+ * <li>{@code id} (64-bit signed integer) - A unique segment identifier. This is a monotonically increasing number within
+ * each log. Segments with in-sequence identifiers should contain in-sequence indexes.</li>
+ * <li>{@code index} (64-bit signed integer) - The effective first index of the segment. This indicates the index at which
+ * the first entry should be written to the segment. Indexes are monotonically increasing thereafter.</li>
+ * <li>{@code version} (64-bit signed integer) - The version of the segment. Versions are monotonically increasing
+ * starting at {@code 1}. Versions will only be incremented whenever the segment is rewritten to another memory/disk
+ * space, e.g. after log compaction.</li>
+ * <li>{@code maxSegmentSize} (32-bit unsigned integer) - The maximum number of bytes allowed in the segment.</li>
+ * <li>{@code maxEntries} (32-bit signed integer) - The total number of expected entries in the segment. This is the final
+ * number of entries allowed within the segment both before and after compaction. This entry count is used to determine
+ * the count of internal indexing and deduplication facilities.</li>
+ * <li>{@code updated} (64-bit signed integer) - The last update to the segment in terms of milliseconds since the epoch.
+ * When the segment is first constructed, the {@code updated} time is {@code 0}. Once all entries in the segment have
+ * been committed, the {@code updated} time should be set to the current time. Log compaction should not result in a
+ * change to {@code updated}.</li>
+ * <li>{@code locked} (8-bit boolean) - A boolean indicating whether the segment is locked. Segments will be locked once
+ * all entries have been committed to the segment. The lock state of each segment is used to determine log compaction
+ * and recovery behavior.</li>
+ * </ul>
+ * The remainder of the 64 segment header bytes are reserved for future metadata.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+public final class JournalSegmentDescriptor {
+  public static final int BYTES = 64;
+
+  // Current segment version.
+  @VisibleForTesting
+  static final int VERSION = 1;
+
+  // The lengths of each field in the header.
+  private static final int VERSION_LENGTH = Integer.BYTES;     // 32-bit signed integer
+  private static final int ID_LENGTH = Long.BYTES;             // 64-bit signed integer
+  private static final int INDEX_LENGTH = Long.BYTES;          // 64-bit signed integer
+  private static final int MAX_SIZE_LENGTH = Integer.BYTES;    // 32-bit signed integer
+  private static final int MAX_ENTRIES_LENGTH = Integer.BYTES; // 32-bit signed integer
+  private static final int UPDATED_LENGTH = Long.BYTES;        // 64-bit signed integer
+
+  // The positions of each field in the header.
+  private static final int VERSION_POSITION = 0;                                         // 0
+  private static final int ID_POSITION = VERSION_POSITION + VERSION_LENGTH;              // 4
+  private static final int INDEX_POSITION = ID_POSITION + ID_LENGTH;                     // 12
+  private static final int MAX_SIZE_POSITION = INDEX_POSITION + INDEX_LENGTH;            // 20
+  private static final int MAX_ENTRIES_POSITION = MAX_SIZE_POSITION + MAX_SIZE_LENGTH;   // 24
+  private static final int UPDATED_POSITION = MAX_ENTRIES_POSITION + MAX_ENTRIES_LENGTH; // 28
+
+  /**
+   * Returns a descriptor builder.
+   * <p>
+   * The descriptor builder will write segment metadata to a {@code 48} byte in-memory buffer.
+   *
+   * @return The descriptor builder.
+   */
+  public static Builder builder() {
+    return new Builder(ByteBuffer.allocate(BYTES));
+  }
+
+  /**
+   * Returns a descriptor builder for the given descriptor buffer.
+   *
+   * @param buffer The descriptor buffer.
+   * @return The descriptor builder.
+   * @throws NullPointerException if {@code buffer} is null
+   */
+  public static Builder builder(ByteBuffer buffer) {
+    return new Builder(buffer);
+  }
+
+  private final ByteBuffer buffer;
+  private final int version;
+  private final long id;
+  private final long index;
+  private final int maxSegmentSize;
+  private final int maxEntries;
+  private volatile long updated;
+  private volatile boolean locked;
+
+  /**
+   * @throws NullPointerException if {@code buffer} is null
+   */
+  public JournalSegmentDescriptor(ByteBuffer buffer) {
+    this.buffer = buffer;
+    this.version = buffer.getInt();
+    this.id = buffer.getLong();
+    this.index = buffer.getLong();
+    this.maxSegmentSize = buffer.getInt();
+    this.maxEntries = buffer.getInt();
+    this.updated = buffer.getLong();
+    this.locked = buffer.get() == 1;
+  }
+
+  /**
+   * Returns the segment version.
+   * <p>
+   * Versions are monotonically increasing starting at {@code 1}.
+   *
+   * @return The segment version.
+   */
+  public int version() {
+    return version;
+  }
+
+  /**
+   * Returns the segment identifier.
+   * <p>
+   * The segment ID is a monotonically increasing number within each log. Segments with in-sequence identifiers should
+   * contain in-sequence indexes.
+   *
+   * @return The segment identifier.
+   */
+  public long id() {
+    return id;
+  }
+
+  /**
+   * Returns the segment index.
+   * <p>
+   * The index indicates the index at which the first entry should be written to the segment. Indexes are monotonically
+   * increasing thereafter.
+   *
+   * @return The segment index.
+   */
+  public long index() {
+    return index;
+  }
+
+  /**
+   * Returns the maximum count of the segment.
+   *
+   * @return The maximum allowed count of the segment.
+   */
+  public int maxSegmentSize() {
+    return maxSegmentSize;
+  }
+
+  /**
+   * Returns the maximum number of entries allowed in the segment.
+   *
+   * @return The maximum number of entries allowed in the segment.
+   */
+  public int maxEntries() {
+    return maxEntries;
+  }
+
+  /**
+   * Returns last time the segment was updated.
+   * <p>
+   * When the segment is first constructed, the {@code updated} time is {@code 0}. Once all entries in the segment have
+   * been committed, the {@code updated} time should be set to the current time. Log compaction should not result in a
+   * change to {@code updated}.
+   *
+   * @return The last time the segment was updated in terms of milliseconds since the epoch.
+   */
+  public long updated() {
+    return updated;
+  }
+
+  /**
+   * Writes an update to the descriptor.
+   */
+  public void update(long timestamp) {
+    if (!locked) {
+      buffer.putLong(UPDATED_POSITION, timestamp);
+      this.updated = timestamp;
+    }
+  }
+
+  /**
+   * Copies the segment to a new buffer.
+   */
+  JournalSegmentDescriptor copyTo(ByteBuffer buffer) {
+    buffer.putInt(version);
+    buffer.putLong(id);
+    buffer.putLong(index);
+    buffer.putInt(maxSegmentSize);
+    buffer.putInt(maxEntries);
+    buffer.putLong(updated);
+    buffer.put(locked ? (byte) 1 : (byte) 0);
+    return this;
+  }
+
+  @Override
+  public String toString() {
+    return toStringHelper(this)
+        .add("version", version)
+        .add("id", id)
+        .add("index", index)
+        .add("updated", updated)
+        .toString();
+  }
+
+  /**
+   * Segment descriptor builder.
+   */
+  public static class Builder {
+    private final ByteBuffer buffer;
+
+    private Builder(ByteBuffer buffer) {
+      this.buffer = requireNonNull(buffer, "buffer cannot be null");
+      buffer.putInt(VERSION_POSITION, VERSION);
+    }
+
+    /**
+     * Sets the segment identifier.
+     *
+     * @param id The segment identifier.
+     * @return The segment descriptor builder.
+     */
+    public Builder withId(long id) {
+      buffer.putLong(ID_POSITION, id);
+      return this;
+    }
+
+    /**
+     * Sets the segment index.
+     *
+     * @param index The segment starting index.
+     * @return The segment descriptor builder.
+     */
+    public Builder withIndex(long index) {
+      buffer.putLong(INDEX_POSITION, index);
+      return this;
+    }
+
+    /**
+     * Sets maximum count of the segment.
+     *
+     * @param maxSegmentSize The maximum count of the segment.
+     * @return The segment descriptor builder.
+     */
+    public Builder withMaxSegmentSize(int maxSegmentSize) {
+      buffer.putInt(MAX_SIZE_POSITION, maxSegmentSize);
+      return this;
+    }
+
+    /**
+     * Sets the maximum number of entries in the segment.
+     *
+     * @param maxEntries The maximum number of entries in the segment.
+     * @return The segment descriptor builder.
+     * @deprecated since 3.0.2
+     */
+    @Deprecated
+    public Builder withMaxEntries(int maxEntries) {
+      buffer.putInt(MAX_ENTRIES_POSITION, maxEntries);
+      return this;
+    }
+
+    /**
+     * Builds the segment descriptor.
+     *
+     * @return The built segment descriptor.
+     */
+    public JournalSegmentDescriptor build() {
+      buffer.rewind();
+      return new JournalSegmentDescriptor(buffer);
+    }
+  }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentFile.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentFile.java
new file mode 100644 (file)
index 0000000..2190dee
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2015-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import java.io.File;
+
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Segment file utility.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+public final class JournalSegmentFile {
+  private static final char PART_SEPARATOR = '-';
+  private static final char EXTENSION_SEPARATOR = '.';
+  private static final String EXTENSION = "log";
+  private final File file;
+
+  /**
+   * Returns a boolean value indicating whether the given file appears to be a parsable segment file.
+   *
+   * @throws NullPointerException if {@code file} is null
+   */
+  public static boolean isSegmentFile(String name, File file) {
+    return isSegmentFile(name, file.getName());
+  }
+
+  /**
+   * Returns a boolean value indicating whether the given file appears to be a parsable segment file.
+   *
+   * @param journalName the name of the journal
+   * @param fileName the name of the file to check
+   * @throws NullPointerException if {@code file} is null
+   */
+  public static boolean isSegmentFile(String journalName, String fileName) {
+    requireNonNull(journalName, "journalName cannot be null");
+    requireNonNull(fileName, "fileName cannot be null");
+
+    int partSeparator = fileName.lastIndexOf(PART_SEPARATOR);
+    int extensionSeparator = fileName.lastIndexOf(EXTENSION_SEPARATOR);
+
+    if (extensionSeparator == -1
+        || partSeparator == -1
+        || extensionSeparator < partSeparator
+        || !fileName.endsWith(EXTENSION)) {
+      return false;
+    }
+
+    for (int i = partSeparator + 1; i < extensionSeparator; i++) {
+      if (!Character.isDigit(fileName.charAt(i))) {
+        return false;
+      }
+    }
+
+    return fileName.startsWith(journalName);
+  }
+
+  /**
+   * Creates a segment file for the given directory, log name, segment ID, and segment version.
+   */
+  static File createSegmentFile(String name, File directory, long id) {
+    return new File(directory, String.format("%s-%d.log", requireNonNull(name, "name cannot be null"), id));
+  }
+
+  /**
+   * @throws IllegalArgumentException if {@code file} is not a valid segment file
+   */
+  JournalSegmentFile(File file) {
+    this.file = file;
+  }
+
+  /**
+   * Returns the segment file.
+   *
+   * @return The segment file.
+   */
+  public File file() {
+    return file;
+  }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentReader.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentReader.java
new file mode 100644 (file)
index 0000000..93ccd17
--- /dev/null
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static com.google.common.base.Verify.verify;
+import static java.util.Objects.requireNonNull;
+
+import com.esotericsoftware.kryo.KryoException;
+import java.util.zip.CRC32;
+import org.eclipse.jdt.annotation.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class JournalSegmentReader<E> {
+    private static final Logger LOG = LoggerFactory.getLogger(JournalSegmentReader.class);
+
+    private final JournalSegment<E> segment;
+    private final JournalSerdes namespace;
+    private final FileReader fileReader;
+    private final int maxSegmentSize;
+    private final int maxEntrySize;
+
+    private int position;
+
+    JournalSegmentReader(final JournalSegment<E> segment, final FileReader fileReader,
+            final int maxEntrySize, final JournalSerdes namespace) {
+        this.segment = requireNonNull(segment);
+        this.fileReader = requireNonNull(fileReader);
+        maxSegmentSize = segment.descriptor().maxSegmentSize();
+        this.maxEntrySize = maxEntrySize;
+        this.namespace = requireNonNull(namespace);
+    }
+
+    /**
+     * Return the current position.
+     *
+     * @return current position.
+     */
+    int position() {
+        return position;
+    }
+
+    /**
+     * Set the file position.
+     *
+     * @param position new position
+     */
+    void setPosition(final int position) {
+        verify(position >= JournalSegmentDescriptor.BYTES && position < maxSegmentSize,
+            "Invalid position %s", position);
+        this.position = position;
+        fileReader.invalidateCache();
+    }
+
+    /**
+     * Invalidate any cache that is present, so that the next read is coherent with the backing file.
+     */
+    void invalidateCache() {
+        fileReader.invalidateCache();
+    }
+
+    /**
+     * Reads the next entry, assigning it specified index.
+     *
+     * @param index entry index
+     * @return The entry, or {@code null}
+     */
+    @Nullable Indexed<E> readEntry(final long index) {
+        // Check if there is enough in the buffer remaining
+        final int remaining = maxSegmentSize - position - SegmentEntry.HEADER_BYTES;
+        if (remaining < 0) {
+            // Not enough space in the segment, there can never be another entry
+            return null;
+        }
+
+        // Calculate maximum entry length not exceeding file size nor maxEntrySize
+        final var maxLength = Math.min(remaining, maxEntrySize);
+        final var buffer = fileReader.read(position, maxLength + SegmentEntry.HEADER_BYTES);
+
+        // Read the entry length
+        final var length = buffer.getInt(0);
+        if (length < 1 || length > maxLength) {
+            // Invalid length, make sure next read re-tries
+            invalidateCache();
+            return null;
+        }
+
+        // Read the entry checksum
+        final int checksum = buffer.getInt(Integer.BYTES);
+
+        // Slice off the entry's bytes
+        final var entryBytes = buffer.slice(SegmentEntry.HEADER_BYTES, length);
+        // Compute the checksum for the entry bytes.
+        final var crc32 = new CRC32();
+        crc32.update(entryBytes);
+
+        // If the stored checksum does not equal the computed checksum, do not proceed further
+        final var computed = (int) crc32.getValue();
+        if (checksum != computed) {
+            LOG.warn("Expected checksum {}, computed {}", Integer.toHexString(checksum), Integer.toHexString(computed));
+            invalidateCache();
+            return null;
+        }
+
+        // Attempt to deserialize
+        final E entry;
+        try {
+            entry = namespace.deserialize(entryBytes.rewind());
+        } catch (KryoException e) {
+            // TODO: promote this to a hard error, as it should never happen
+            LOG.debug("Failed to deserialize entry", e);
+            invalidateCache();
+            return null;
+        }
+
+        // We are all set. Update the position.
+        position = position + SegmentEntry.HEADER_BYTES + length;
+        return new Indexed<>(index, entry, length);
+    }
+
+    /**
+     * Close this reader.
+     */
+    void close() {
+        segment.closeReader(this);
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentWriter.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentWriter.java
new file mode 100644 (file)
index 0000000..c7c035b
--- /dev/null
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static io.atomix.storage.journal.SegmentEntry.HEADER_BYTES;
+import static java.util.Objects.requireNonNull;
+
+import com.esotericsoftware.kryo.KryoException;
+import io.atomix.storage.journal.index.JournalIndex;
+import java.nio.ByteBuffer;
+import java.nio.MappedByteBuffer;
+import java.nio.channels.FileChannel;
+import java.util.zip.CRC32;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+abstract sealed class JournalSegmentWriter<E> permits DiskJournalSegmentWriter, MappedJournalSegmentWriter {
+    private static final Logger LOG = LoggerFactory.getLogger(JournalSegmentWriter.class);
+
+    final @NonNull FileChannel channel;
+    final @NonNull JournalSegment<E> segment;
+    private final @NonNull JournalIndex index;
+    final @NonNull JournalSerdes namespace;
+    final int maxSegmentSize;
+    final int maxEntrySize;
+
+    private Indexed<E> lastEntry;
+    private int currentPosition;
+
+    JournalSegmentWriter(final FileChannel channel, final JournalSegment<E> segment, final int maxEntrySize,
+            final JournalIndex index, final JournalSerdes namespace) {
+        this.channel = requireNonNull(channel);
+        this.segment = requireNonNull(segment);
+        this.index = requireNonNull(index);
+        this.namespace = requireNonNull(namespace);
+        maxSegmentSize = segment.descriptor().maxSegmentSize();
+        this.maxEntrySize = maxEntrySize;
+    }
+
+    JournalSegmentWriter(final JournalSegmentWriter<E> previous) {
+        channel = previous.channel;
+        segment = previous.segment;
+        index = previous.index;
+        namespace = previous.namespace;
+        maxSegmentSize = previous.maxSegmentSize;
+        maxEntrySize = previous.maxEntrySize;
+        lastEntry = previous.lastEntry;
+        currentPosition = previous.currentPosition;
+    }
+
+    /**
+     * Returns the last written index.
+     *
+     * @return The last written index.
+     */
+    final long getLastIndex() {
+        return lastEntry != null ? lastEntry.index() : segment.firstIndex() - 1;
+    }
+
+    /**
+     * Returns the last entry written.
+     *
+     * @return The last entry written.
+     */
+    final Indexed<E> getLastEntry() {
+        return lastEntry;
+    }
+
+    /**
+     * Returns the next index to be written.
+     *
+     * @return The next index to be written.
+     */
+    final long getNextIndex() {
+        return lastEntry != null ? lastEntry.index() + 1 : segment.firstIndex();
+    }
+
+    /**
+     * Tries to append an entry to the journal.
+     *
+     * @param entry The entry to append.
+     * @return The appended indexed entry, or {@code null} if there is not enough space available
+     */
+    final <T extends E> @Nullable Indexed<T> append(final T entry) {
+        // Store the entry index.
+        final long index = getNextIndex();
+        final int position = currentPosition;
+
+        // Serialize the entry.
+        final int bodyPosition = position + HEADER_BYTES;
+        final int avail = maxSegmentSize - bodyPosition;
+        if (avail < 0) {
+            LOG.trace("Not enough space for {} at {}", index, position);
+            return null;
+        }
+
+        final var writeLimit = Math.min(avail, maxEntrySize);
+        final var diskEntry = startWrite(position, writeLimit + HEADER_BYTES).position(HEADER_BYTES);
+        try {
+            namespace.serialize(entry, diskEntry);
+        } catch (KryoException e) {
+            if (writeLimit != maxEntrySize) {
+                // We have not provided enough capacity, signal to roll to next segment
+                LOG.trace("Tail serialization with {} bytes available failed", writeLimit, e);
+                return null;
+            }
+
+            // Just reset the buffer. There's no need to zero the bytes since we haven't written the length or checksum.
+            throw new StorageException.TooLarge("Entry size exceeds maximum allowed bytes (" + maxEntrySize + ")", e);
+        }
+
+        final int length = diskEntry.position() - HEADER_BYTES;
+
+        // Compute the checksum for the entry.
+        final var crc32 = new CRC32();
+        crc32.update(diskEntry.flip().position(HEADER_BYTES));
+
+        // Create a single byte[] in memory for the entire entry and write it as a batch to the underlying buffer.
+        diskEntry.putInt(0, length).putInt(Integer.BYTES, (int) crc32.getValue());
+        commitWrite(position, diskEntry.rewind());
+
+        // Update the last entry with the correct index/term/length.
+        final var indexedEntry = new Indexed<E>(index, entry, length);
+        lastEntry = indexedEntry;
+        this.index.index(index, position);
+
+        currentPosition = bodyPosition + length;
+
+        @SuppressWarnings("unchecked")
+        final var ugly = (Indexed<T>) indexedEntry;
+        return ugly;
+    }
+
+    abstract ByteBuffer startWrite(int position, int size);
+
+    abstract void commitWrite(int position, ByteBuffer entry);
+
+    /**
+     * Resets the head of the segment to the given index.
+     *
+     * @param index the index to which to reset the head of the segment
+     */
+    final void reset(final long index) {
+        // acquire ownership of cache and make sure reader does not see anything we've done once we're done
+        final var reader = reader();
+        reader.invalidateCache();
+        try {
+            resetWithBuffer(reader, index);
+        } finally {
+            // Make sure reader does not see anything we've done
+            reader.invalidateCache();
+        }
+    }
+
+    abstract JournalSegmentReader<E> reader();
+
+    private void resetWithBuffer(final JournalSegmentReader<E> reader, final long index) {
+        long nextIndex = segment.firstIndex();
+
+        // Clear the buffer indexes and acquire ownership of the buffer
+        currentPosition = JournalSegmentDescriptor.BYTES;
+        reader.setPosition(JournalSegmentDescriptor.BYTES);
+
+        while (index == 0 || nextIndex <= index) {
+            final var entry = reader.readEntry(nextIndex);
+            if (entry == null) {
+                break;
+            }
+
+            lastEntry = entry;
+            this.index.index(nextIndex, currentPosition);
+            nextIndex++;
+
+            // Update the current position for indexing.
+            currentPosition = currentPosition + HEADER_BYTES + entry.size();
+        }
+    }
+
+    /**
+     * Truncates the log to the given index.
+     *
+     * @param index The index to which to truncate the log.
+     */
+    final void truncate(final long index) {
+        // If the index is greater than or equal to the last index, skip the truncate.
+        if (index >= getLastIndex()) {
+            return;
+        }
+
+        // Reset the last entry.
+        lastEntry = null;
+
+        // Truncate the index.
+        this.index.truncate(index);
+
+        if (index < segment.firstIndex()) {
+            // Reset the writer to the first entry.
+            currentPosition = JournalSegmentDescriptor.BYTES;
+        } else {
+            // Reset the writer to the given index.
+            reset(index);
+        }
+
+        // Zero the entry header at current channel position.
+        writeEmptyHeader(currentPosition);
+    }
+
+    /**
+     * Write {@link SegmentEntry#HEADER_BYTES} worth of zeroes at specified position.
+     *
+     * @param position position to write to
+     */
+    abstract void writeEmptyHeader(int position);
+
+    /**
+     * Flushes written entries to disk.
+     */
+    abstract void flush();
+
+    /**
+     * Closes this writer.
+     */
+    abstract void close();
+
+    /**
+     * Returns the mapped buffer underlying the segment writer, or {@code null} if the writer does not have such a
+     * buffer.
+     *
+     * @return the mapped buffer underlying the segment writer, or {@code null}.
+     */
+    abstract @Nullable MappedByteBuffer buffer();
+
+    abstract @NonNull MappedJournalSegmentWriter<E> toMapped();
+
+    abstract @NonNull DiskJournalSegmentWriter<E> toFileChannel();
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSerdes.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSerdes.java
new file mode 100644 (file)
index 0000000..32fc8d3
--- /dev/null
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2014-2021 Open Networking Foundation
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import com.google.common.annotations.Beta;
+import com.google.common.annotations.VisibleForTesting;
+import io.atomix.utils.serializer.KryoJournalSerdesBuilder;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+
+/**
+ * Support for serialization of {@link Journal} entries.
+ */
+public interface JournalSerdes {
+    /**
+     * Serializes given object to byte array.
+     *
+     * @param obj Object to serialize
+     * @return serialized bytes
+     */
+    byte[] serialize(Object obj);
+
+    /**
+     * Serializes given object to byte array.
+     *
+     * @param obj        Object to serialize
+     * @param bufferSize maximum size of serialized bytes
+     * @return serialized bytes
+     */
+    byte[] serialize(Object obj, int bufferSize);
+
+    /**
+     * Serializes given object to byte buffer.
+     *
+     * @param obj    Object to serialize
+     * @param buffer to write to
+     */
+    void serialize(Object obj, ByteBuffer buffer);
+
+    /**
+     * Serializes given object to OutputStream.
+     *
+     * @param obj    Object to serialize
+     * @param stream to write to
+     */
+    void serialize(Object obj, OutputStream stream);
+
+    /**
+     * Serializes given object to OutputStream.
+     *
+     * @param obj        Object to serialize
+     * @param stream     to write to
+     * @param bufferSize size of the buffer in front of the stream
+     */
+    void serialize(Object obj, OutputStream stream, int bufferSize);
+
+    /**
+     * Deserializes given byte array to Object.
+     *
+     * @param bytes serialized bytes
+     * @param <T>   deserialized Object type
+     * @return deserialized Object
+     */
+    <T> T deserialize(byte[] bytes);
+
+    /**
+     * Deserializes given byte buffer to Object.
+     *
+     * @param buffer input with serialized bytes
+     * @param <T>    deserialized Object type
+     * @return deserialized Object
+     */
+    <T> T deserialize(final ByteBuffer buffer);
+
+    /**
+     * Deserializes given InputStream to an Object.
+     *
+     * @param stream input stream
+     * @param <T>    deserialized Object type
+     * @return deserialized Object
+     */
+    <T> T deserialize(InputStream stream);
+
+    /**
+     * Deserializes given InputStream to an Object.
+     *
+     * @param stream     input stream
+     * @param <T>        deserialized Object type
+     * @param bufferSize size of the buffer in front of the stream
+     * @return deserialized Object
+     */
+    <T> T deserialize(final InputStream stream, final int bufferSize);
+
+    /**
+     * Creates a new {@link JournalSerdes} builder.
+     *
+     * @return builder
+     */
+    static Builder builder() {
+        return new KryoJournalSerdesBuilder();
+    }
+
+    /**
+     * Builder for {@link JournalSerdes}.
+     */
+    interface Builder {
+        /**
+         * Builds a {@link JournalSerdes} instance.
+         *
+         * @return A {@link JournalSerdes} implementation.
+         */
+        JournalSerdes build();
+
+        /**
+         * Builds a {@link JournalSerdes} instance.
+         *
+         * @param friendlyName friendly name for the namespace
+         * @return A {@link JournalSerdes} implementation.
+         */
+        JournalSerdes build(String friendlyName);
+
+        /**
+         * Registers serializer for the given set of classes.
+         * <p>
+         * When multiple classes are registered with an explicitly provided serializer, the namespace guarantees
+         * all instances will be serialized with the same type ID.
+         *
+         * @param classes list of classes to register
+         * @param serdes  serializer to use for the class
+         * @return this builder
+         */
+        Builder register(EntrySerdes<?> serdes, Class<?>... classes);
+
+        /**
+         * Sets the namespace class loader.
+         *
+         * @param classLoader the namespace class loader
+         * @return this builder
+         */
+        Builder setClassLoader(ClassLoader classLoader);
+    }
+
+    /**
+     * Input data stream exposed to {@link EntrySerdes#read(EntryInput)}.
+     */
+    @Beta
+    interface EntryInput {
+
+        byte[] readBytes(int length) throws IOException;
+
+        long readLong() throws IOException;
+
+        String readString() throws IOException;
+
+        Object readObject() throws IOException;
+
+        @VisibleForTesting
+        int readVarInt() throws IOException;
+    }
+
+    /**
+     * Output data stream exposed to {@link EntrySerdes#write(EntryOutput, Object)}.
+     */
+    @Beta
+    interface EntryOutput {
+
+        void writeBytes(byte[] bytes) throws IOException;
+
+        void writeLong(long value) throws IOException;
+
+        void writeObject(Object value) throws IOException;
+
+        void writeString(String value) throws IOException;
+
+        @VisibleForTesting
+        void writeVarInt(int value) throws IOException;
+    }
+
+    /**
+     * A serializer/deserializer for an entry.
+     *
+     * @param <T> Entry type
+     */
+    interface EntrySerdes<T> {
+
+        T read(EntryInput input) throws IOException;
+
+        void write(EntryOutput output, T entry) throws IOException;
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalWriter.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalWriter.java
new file mode 100644 (file)
index 0000000..1462463
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import org.eclipse.jdt.annotation.NonNull;
+
+/**
+ * Log writer.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+public interface JournalWriter<E> {
+    /**
+     * Returns the last written index.
+     *
+     * @return The last written index.
+     */
+    long getLastIndex();
+
+    /**
+     * Returns the last entry written.
+     *
+     * @return The last entry written.
+     */
+    Indexed<E> getLastEntry();
+
+    /**
+     * Returns the next index to be written.
+     *
+     * @return The next index to be written.
+     */
+    long getNextIndex();
+
+    /**
+     * Appends an entry to the journal.
+     *
+     * @param entry The entry to append.
+     * @return The appended indexed entry.
+     */
+    <T extends E> @NonNull Indexed<T> append(T entry);
+
+    /**
+     * Commits entries up to the given index.
+     *
+     * @param index The index up to which to commit entries.
+     */
+    void commit(long index);
+
+    /**
+     * Resets the head of the journal to the given index.
+     *
+     * @param index the index to which to reset the head of the journal
+     */
+    void reset(long index);
+
+    /**
+     * Truncates the log to the given index.
+     *
+     * @param index The index to which to truncate the log.
+     */
+    void truncate(long index);
+
+    /**
+     * Flushes written entries to disk.
+     */
+    void flush();
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/MappedFileReader.java b/atomix-storage/src/main/java/io/atomix/storage/journal/MappedFileReader.java
new file mode 100644 (file)
index 0000000..204fd72
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import java.nio.ByteBuffer;
+import java.nio.file.Path;
+
+/**
+ * A {@link StorageLevel#MAPPED} implementation of {@link FileReader}. Operates on direct mapping of the entire file.
+ */
+final class MappedFileReader extends FileReader {
+    private final ByteBuffer buffer;
+
+    MappedFileReader(final Path path, final ByteBuffer buffer) {
+        super(path);
+        this.buffer = buffer.slice().asReadOnlyBuffer();
+    }
+
+    @Override
+    void invalidateCache() {
+        // No-op: the mapping is guaranteed to be coherent
+    }
+
+    @Override
+    ByteBuffer read(final int position, final int size) {
+        return buffer.slice(position, size);
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/MappedJournalSegmentWriter.java b/atomix-storage/src/main/java/io/atomix/storage/journal/MappedJournalSegmentWriter.java
new file mode 100644 (file)
index 0000000..00dd4c6
--- /dev/null
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import io.atomix.storage.journal.index.JournalIndex;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.MappedByteBuffer;
+import java.nio.channels.FileChannel;
+import org.eclipse.jdt.annotation.NonNull;
+
+/**
+ * Segment writer.
+ * <p>
+ * The format of an entry in the log is as follows:
+ * <ul>
+ * <li>64-bit index</li>
+ * <li>8-bit boolean indicating whether a term change is contained in the entry</li>
+ * <li>64-bit optional term</li>
+ * <li>32-bit signed entry length, including the entry type ID</li>
+ * <li>8-bit signed entry type ID</li>
+ * <li>n-bit entry bytes</li>
+ * </ul>
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+final class MappedJournalSegmentWriter<E> extends JournalSegmentWriter<E> {
+    private final @NonNull MappedByteBuffer mappedBuffer;
+    private final JournalSegmentReader<E> reader;
+    private final ByteBuffer buffer;
+
+    MappedJournalSegmentWriter(final FileChannel channel, final JournalSegment<E> segment, final int maxEntrySize,
+        final JournalIndex index, final JournalSerdes namespace) {
+        super(channel, segment, maxEntrySize, index, namespace);
+
+        mappedBuffer = mapBuffer(channel, maxSegmentSize);
+        buffer = mappedBuffer.slice();
+        reader = new JournalSegmentReader<>(segment, new MappedFileReader(segment.file().file().toPath(), mappedBuffer),
+            maxEntrySize, namespace);
+        reset(0);
+    }
+
+    MappedJournalSegmentWriter(final JournalSegmentWriter<E> previous) {
+        super(previous);
+
+        mappedBuffer = mapBuffer(channel, maxSegmentSize);
+        buffer = mappedBuffer.slice();
+        reader = new JournalSegmentReader<>(segment, new MappedFileReader(segment.file().file().toPath(), mappedBuffer),
+            maxEntrySize, namespace);
+    }
+
+    private static @NonNull MappedByteBuffer mapBuffer(final FileChannel channel, final int maxSegmentSize) {
+        try {
+            return channel.map(FileChannel.MapMode.READ_WRITE, 0, maxSegmentSize);
+        } catch (IOException e) {
+            throw new StorageException(e);
+        }
+    }
+
+    @Override
+    @NonNull MappedByteBuffer buffer() {
+        return mappedBuffer;
+    }
+
+    @Override
+    MappedJournalSegmentWriter<E> toMapped() {
+        return this;
+    }
+
+    @Override
+    DiskJournalSegmentWriter<E> toFileChannel() {
+        close();
+        return new DiskJournalSegmentWriter<>(this);
+    }
+
+    @Override
+    JournalSegmentReader<E> reader() {
+        return reader;
+    }
+
+    @Override
+    ByteBuffer startWrite(final int position, final int size) {
+        return buffer.slice(position, size);
+    }
+
+    @Override
+    void commitWrite(final int position, final ByteBuffer entry) {
+        // No-op, buffer is write-through
+    }
+
+    @Override
+    void writeEmptyHeader(final int position) {
+        // Note: we issue a single putLong() instead of two putInt()s.
+        buffer.putLong(position, 0L);
+    }
+
+    @Override
+    void flush() {
+        mappedBuffer.force();
+    }
+
+    @Override
+    void close() {
+        flush();
+        try {
+            BufferCleaner.freeBuffer(mappedBuffer);
+        } catch (IOException e) {
+            throw new StorageException(e);
+        }
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentEntry.java b/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentEntry.java
new file mode 100644 (file)
index 0000000..be6c6ba
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import java.nio.ByteBuffer;
+
+/**
+ * An {@link Indexed} entry read from {@link JournalSegment}.
+ *
+ * @param checksum The CRC32 checksum of data
+ * @param bytes Entry bytes
+ */
+record SegmentEntry(int checksum, ByteBuffer bytes) {
+    /**
+     * The size of the header, comprising of:
+     * <ul>
+     *   <li>32-bit signed entry length</li>
+     *   <li>32-bit unsigned CRC32 checksum</li>
+     * </li>
+     */
+    static final int HEADER_BYTES = Integer.BYTES + Integer.BYTES;
+
+    SegmentEntry {
+        if (bytes.remaining() < 1) {
+            throw new IllegalArgumentException("Invalid entry bytes " + bytes);
+        }
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournal.java b/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournal.java
new file mode 100644 (file)
index 0000000..ef1a4cf
--- /dev/null
@@ -0,0 +1,868 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.file.StandardOpenOption;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentNavigableMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Segmented journal.
+ */
+public final class SegmentedJournal<E> implements Journal<E> {
+  /**
+   * Returns a new Raft log builder.
+   *
+   * @return A new Raft log builder.
+   */
+  public static <E> Builder<E> builder() {
+    return new Builder<>();
+  }
+
+  private static final Logger LOG = LoggerFactory.getLogger(SegmentedJournal.class);
+  private static final int SEGMENT_BUFFER_FACTOR = 3;
+
+  private final String name;
+  private final StorageLevel storageLevel;
+  private final File directory;
+  private final JournalSerdes namespace;
+  private final int maxSegmentSize;
+  private final int maxEntrySize;
+  private final int maxEntriesPerSegment;
+  private final double indexDensity;
+  private final boolean flushOnCommit;
+  private final SegmentedJournalWriter<E> writer;
+  private volatile long commitIndex;
+
+  private final ConcurrentNavigableMap<Long, JournalSegment<E>> segments = new ConcurrentSkipListMap<>();
+  private final Collection<SegmentedJournalReader<E>> readers = ConcurrentHashMap.newKeySet();
+  private JournalSegment<E> currentSegment;
+
+  private volatile boolean open = true;
+
+  public SegmentedJournal(
+      String name,
+      StorageLevel storageLevel,
+      File directory,
+      JournalSerdes namespace,
+      int maxSegmentSize,
+      int maxEntrySize,
+      int maxEntriesPerSegment,
+      double indexDensity,
+      boolean flushOnCommit) {
+    this.name = requireNonNull(name, "name cannot be null");
+    this.storageLevel = requireNonNull(storageLevel, "storageLevel cannot be null");
+    this.directory = requireNonNull(directory, "directory cannot be null");
+    this.namespace = requireNonNull(namespace, "namespace cannot be null");
+    this.maxSegmentSize = maxSegmentSize;
+    this.maxEntrySize = maxEntrySize;
+    this.maxEntriesPerSegment = maxEntriesPerSegment;
+    this.indexDensity = indexDensity;
+    this.flushOnCommit = flushOnCommit;
+    open();
+    this.writer = new SegmentedJournalWriter<>(this);
+  }
+
+  /**
+   * Returns the segment file name prefix.
+   *
+   * @return The segment file name prefix.
+   */
+  public String name() {
+    return name;
+  }
+
+  /**
+   * Returns the storage directory.
+   * <p>
+   * The storage directory is the directory to which all segments write files. Segment files for multiple logs may be
+   * stored in the storage directory, and files for each log instance will be identified by the {@code prefix} provided
+   * when the log is opened.
+   *
+   * @return The storage directory.
+   */
+  public File directory() {
+    return directory;
+  }
+
+  /**
+   * Returns the storage level.
+   * <p>
+   * The storage level dictates how entries within individual journal segments should be stored.
+   *
+   * @return The storage level.
+   */
+  public StorageLevel storageLevel() {
+    return storageLevel;
+  }
+
+  /**
+   * Returns the maximum journal segment size.
+   * <p>
+   * The maximum segment size dictates the maximum size any segment in a segment may consume in bytes.
+   *
+   * @return The maximum segment size in bytes.
+   */
+  public int maxSegmentSize() {
+    return maxSegmentSize;
+  }
+
+  /**
+   * Returns the maximum journal entry size.
+   * <p>
+   * The maximum entry size dictates the maximum size any entry in the segment may consume in bytes.
+   *
+   * @return the maximum entry size in bytes
+   */
+  public int maxEntrySize() {
+    return maxEntrySize;
+  }
+
+  /**
+   * Returns the maximum number of entries per segment.
+   * <p>
+   * The maximum entries per segment dictates the maximum number of entries that are allowed to be stored in any segment
+   * in a journal.
+   *
+   * @return The maximum number of entries per segment.
+   * @deprecated since 3.0.2
+   */
+  @Deprecated
+  public int maxEntriesPerSegment() {
+    return maxEntriesPerSegment;
+  }
+
+  /**
+   * Returns the collection of journal segments.
+   *
+   * @return the collection of journal segments
+   */
+  public Collection<JournalSegment<E>> segments() {
+    return segments.values();
+  }
+
+  /**
+   * Returns the collection of journal segments with indexes greater than the given index.
+   *
+   * @param index the starting index
+   * @return the journal segments starting with indexes greater than or equal to the given index
+   */
+  public Collection<JournalSegment<E>> segments(long index) {
+    return segments.tailMap(index).values();
+  }
+
+  /**
+   * Returns the total size of the journal.
+   *
+   * @return the total size of the journal
+   */
+  public long size() {
+    return segments.values().stream()
+        .mapToLong(segment -> segment.size())
+        .sum();
+  }
+
+  @Override
+  public JournalWriter<E> writer() {
+    return writer;
+  }
+
+  @Override
+  public JournalReader<E> openReader(long index) {
+    return openReader(index, JournalReader.Mode.ALL);
+  }
+
+  /**
+   * Opens a new Raft log reader with the given reader mode.
+   *
+   * @param index The index from which to begin reading entries.
+   * @param mode The mode in which to read entries.
+   * @return The Raft log reader.
+   */
+  public JournalReader<E> openReader(long index, JournalReader.Mode mode) {
+    final var segment = getSegment(index);
+    final var reader = switch (mode) {
+      case ALL -> new SegmentedJournalReader<>(this, segment);
+      case COMMITS -> new CommitsSegmentJournalReader<>(this, segment);
+    };
+
+    // Forward reader to specified index
+    long next = reader.getNextIndex();
+    while (index > next && reader.tryNext() != null) {
+        next = reader.getNextIndex();
+    }
+
+    readers.add(reader);
+    return reader;
+  }
+
+  /**
+   * Opens the segments.
+   */
+  private synchronized void open() {
+    // Load existing log segments from disk.
+    for (JournalSegment<E> segment : loadSegments()) {
+      segments.put(segment.descriptor().index(), segment);
+    }
+
+    // If a segment doesn't already exist, create an initial segment starting at index 1.
+    if (!segments.isEmpty()) {
+      currentSegment = segments.lastEntry().getValue();
+    } else {
+      JournalSegmentDescriptor descriptor = JournalSegmentDescriptor.builder()
+          .withId(1)
+          .withIndex(1)
+          .withMaxSegmentSize(maxSegmentSize)
+          .withMaxEntries(maxEntriesPerSegment)
+          .build();
+
+      currentSegment = createSegment(descriptor);
+      currentSegment.descriptor().update(System.currentTimeMillis());
+
+      segments.put(1L, currentSegment);
+    }
+  }
+
+  /**
+   * Asserts that the manager is open.
+   *
+   * @throws IllegalStateException if the segment manager is not open
+   */
+  private void assertOpen() {
+    checkState(currentSegment != null, "journal not open");
+  }
+
+  /**
+   * Asserts that enough disk space is available to allocate a new segment.
+   */
+  private void assertDiskSpace() {
+    if (directory().getUsableSpace() < maxSegmentSize() * SEGMENT_BUFFER_FACTOR) {
+      throw new StorageException.OutOfDiskSpace("Not enough space to allocate a new journal segment");
+    }
+  }
+
+  /**
+   * Resets the current segment, creating a new segment if necessary.
+   */
+  private synchronized void resetCurrentSegment() {
+    JournalSegment<E> lastSegment = getLastSegment();
+    if (lastSegment != null) {
+      currentSegment = lastSegment;
+    } else {
+      JournalSegmentDescriptor descriptor = JournalSegmentDescriptor.builder()
+          .withId(1)
+          .withIndex(1)
+          .withMaxSegmentSize(maxSegmentSize)
+          .withMaxEntries(maxEntriesPerSegment)
+          .build();
+
+      currentSegment = createSegment(descriptor);
+
+      segments.put(1L, currentSegment);
+    }
+  }
+
+  /**
+   * Resets and returns the first segment in the journal.
+   *
+   * @param index the starting index of the journal
+   * @return the first segment
+   */
+  JournalSegment<E> resetSegments(long index) {
+    assertOpen();
+
+    // If the index already equals the first segment index, skip the reset.
+    JournalSegment<E> firstSegment = getFirstSegment();
+    if (index == firstSegment.firstIndex()) {
+      return firstSegment;
+    }
+
+    for (JournalSegment<E> segment : segments.values()) {
+      segment.close();
+      segment.delete();
+    }
+    segments.clear();
+
+    JournalSegmentDescriptor descriptor = JournalSegmentDescriptor.builder()
+        .withId(1)
+        .withIndex(index)
+        .withMaxSegmentSize(maxSegmentSize)
+        .withMaxEntries(maxEntriesPerSegment)
+        .build();
+    currentSegment = createSegment(descriptor);
+    segments.put(index, currentSegment);
+    return currentSegment;
+  }
+
+  /**
+   * Returns the first segment in the log.
+   *
+   * @throws IllegalStateException if the segment manager is not open
+   */
+  JournalSegment<E> getFirstSegment() {
+    assertOpen();
+    Map.Entry<Long, JournalSegment<E>> segment = segments.firstEntry();
+    return segment != null ? segment.getValue() : null;
+  }
+
+  /**
+   * Returns the last segment in the log.
+   *
+   * @throws IllegalStateException if the segment manager is not open
+   */
+  JournalSegment<E> getLastSegment() {
+    assertOpen();
+    Map.Entry<Long, JournalSegment<E>> segment = segments.lastEntry();
+    return segment != null ? segment.getValue() : null;
+  }
+
+  /**
+   * Creates and returns the next segment.
+   *
+   * @return The next segment.
+   * @throws IllegalStateException if the segment manager is not open
+   */
+  synchronized JournalSegment<E> getNextSegment() {
+    assertOpen();
+    assertDiskSpace();
+
+    JournalSegment<E> lastSegment = getLastSegment();
+    JournalSegmentDescriptor descriptor = JournalSegmentDescriptor.builder()
+        .withId(lastSegment != null ? lastSegment.descriptor().id() + 1 : 1)
+        .withIndex(currentSegment.lastIndex() + 1)
+        .withMaxSegmentSize(maxSegmentSize)
+        .withMaxEntries(maxEntriesPerSegment)
+        .build();
+
+    currentSegment = createSegment(descriptor);
+
+    segments.put(descriptor.index(), currentSegment);
+    return currentSegment;
+  }
+
+  /**
+   * Returns the segment following the segment with the given ID.
+   *
+   * @param index The segment index with which to look up the next segment.
+   * @return The next segment for the given index.
+   */
+  JournalSegment<E> getNextSegment(long index) {
+    Map.Entry<Long, JournalSegment<E>> nextSegment = segments.higherEntry(index);
+    return nextSegment != null ? nextSegment.getValue() : null;
+  }
+
+  /**
+   * Returns the segment for the given index.
+   *
+   * @param index The index for which to return the segment.
+   * @throws IllegalStateException if the segment manager is not open
+   */
+  synchronized JournalSegment<E> getSegment(long index) {
+    assertOpen();
+    // Check if the current segment contains the given index first in order to prevent an unnecessary map lookup.
+    if (currentSegment != null && index > currentSegment.firstIndex()) {
+      return currentSegment;
+    }
+
+    // If the index is in another segment, get the entry with the next lowest first index.
+    Map.Entry<Long, JournalSegment<E>> segment = segments.floorEntry(index);
+    if (segment != null) {
+      return segment.getValue();
+    }
+    return getFirstSegment();
+  }
+
+  /**
+   * Removes a segment.
+   *
+   * @param segment The segment to remove.
+   */
+  synchronized void removeSegment(JournalSegment<E> segment) {
+    segments.remove(segment.firstIndex());
+    segment.close();
+    segment.delete();
+    resetCurrentSegment();
+  }
+
+  /**
+   * Creates a new segment.
+   */
+  JournalSegment<E> createSegment(JournalSegmentDescriptor descriptor) {
+    File segmentFile = JournalSegmentFile.createSegmentFile(name, directory, descriptor.id());
+
+    RandomAccessFile raf;
+    FileChannel channel;
+    try {
+      raf = new RandomAccessFile(segmentFile, "rw");
+      raf.setLength(descriptor.maxSegmentSize());
+      channel =  raf.getChannel();
+    } catch (IOException e) {
+      throw new StorageException(e);
+    }
+
+    ByteBuffer buffer = ByteBuffer.allocate(JournalSegmentDescriptor.BYTES);
+    descriptor.copyTo(buffer);
+    buffer.flip();
+    try {
+      channel.write(buffer);
+    } catch (IOException e) {
+      throw new StorageException(e);
+    } finally {
+      try {
+        channel.close();
+        raf.close();
+      } catch (IOException e) {
+      }
+    }
+    JournalSegment<E> segment = newSegment(new JournalSegmentFile(segmentFile), descriptor);
+    LOG.debug("Created segment: {}", segment);
+    return segment;
+  }
+
+  /**
+   * Creates a new segment instance.
+   *
+   * @param segmentFile The segment file.
+   * @param descriptor The segment descriptor.
+   * @return The segment instance.
+   */
+  protected JournalSegment<E> newSegment(JournalSegmentFile segmentFile, JournalSegmentDescriptor descriptor) {
+    return new JournalSegment<>(segmentFile, descriptor, storageLevel, maxEntrySize, indexDensity, namespace);
+  }
+
+  /**
+   * Loads a segment.
+   */
+  private JournalSegment<E> loadSegment(long segmentId) {
+    File segmentFile = JournalSegmentFile.createSegmentFile(name, directory, segmentId);
+    ByteBuffer buffer = ByteBuffer.allocate(JournalSegmentDescriptor.BYTES);
+    try (FileChannel channel = openChannel(segmentFile)) {
+      channel.read(buffer);
+      buffer.flip();
+      JournalSegmentDescriptor descriptor = new JournalSegmentDescriptor(buffer);
+      JournalSegment<E> segment = newSegment(new JournalSegmentFile(segmentFile), descriptor);
+      LOG.debug("Loaded disk segment: {} ({})", descriptor.id(), segmentFile.getName());
+      return segment;
+    } catch (IOException e) {
+      throw new StorageException(e);
+    }
+  }
+
+  private FileChannel openChannel(File file) {
+    try {
+      return FileChannel.open(file.toPath(), StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE);
+    } catch (IOException e) {
+      throw new StorageException(e);
+    }
+  }
+
+  /**
+   * Loads all segments from disk.
+   *
+   * @return A collection of segments for the log.
+   */
+  protected Collection<JournalSegment<E>> loadSegments() {
+    // Ensure log directories are created.
+    directory.mkdirs();
+
+    TreeMap<Long, JournalSegment<E>> segments = new TreeMap<>();
+
+    // Iterate through all files in the log directory.
+    for (File file : directory.listFiles(File::isFile)) {
+
+      // If the file looks like a segment file, attempt to load the segment.
+      if (JournalSegmentFile.isSegmentFile(name, file)) {
+        JournalSegmentFile segmentFile = new JournalSegmentFile(file);
+        ByteBuffer buffer = ByteBuffer.allocate(JournalSegmentDescriptor.BYTES);
+        try (FileChannel channel = openChannel(file)) {
+          channel.read(buffer);
+          buffer.flip();
+        } catch (IOException e) {
+          throw new StorageException(e);
+        }
+
+        JournalSegmentDescriptor descriptor = new JournalSegmentDescriptor(buffer);
+
+        // Load the segment.
+        JournalSegment<E> segment = loadSegment(descriptor.id());
+
+        // Add the segment to the segments list.
+        LOG.debug("Found segment: {} ({})", segment.descriptor().id(), segmentFile.file().getName());
+        segments.put(segment.firstIndex(), segment);
+      }
+    }
+
+    // Verify that all the segments in the log align with one another.
+    JournalSegment<E> previousSegment = null;
+    boolean corrupted = false;
+    Iterator<Map.Entry<Long, JournalSegment<E>>> iterator = segments.entrySet().iterator();
+    while (iterator.hasNext()) {
+      JournalSegment<E> segment = iterator.next().getValue();
+      if (previousSegment != null && previousSegment.lastIndex() != segment.firstIndex() - 1) {
+        LOG.warn("Journal is inconsistent. {} is not aligned with prior segment {}", segment.file().file(), previousSegment.file().file());
+        corrupted = true;
+      }
+      if (corrupted) {
+        segment.close();
+        segment.delete();
+        iterator.remove();
+      }
+      previousSegment = segment;
+    }
+
+    return segments.values();
+  }
+
+  /**
+   * Resets journal readers to the given head.
+   *
+   * @param index The index at which to reset readers.
+   */
+  void resetHead(long index) {
+    for (SegmentedJournalReader<E> reader : readers) {
+      if (reader.getNextIndex() < index) {
+        reader.reset(index);
+      }
+    }
+  }
+
+  /**
+   * Resets journal readers to the given tail.
+   *
+   * @param index The index at which to reset readers.
+   */
+  void resetTail(long index) {
+    for (SegmentedJournalReader<E> reader : readers) {
+      if (reader.getNextIndex() >= index) {
+        reader.reset(index);
+      }
+    }
+  }
+
+  void closeReader(SegmentedJournalReader<E> reader) {
+    readers.remove(reader);
+  }
+
+  @Override
+  public boolean isOpen() {
+    return open;
+  }
+
+  /**
+   * Returns a boolean indicating whether a segment can be removed from the journal prior to the given index.
+   *
+   * @param index the index from which to remove segments
+   * @return indicates whether a segment can be removed from the journal
+   */
+  public boolean isCompactable(long index) {
+    Map.Entry<Long, JournalSegment<E>> segmentEntry = segments.floorEntry(index);
+    return segmentEntry != null && segments.headMap(segmentEntry.getValue().firstIndex()).size() > 0;
+  }
+
+  /**
+   * Returns the index of the last segment in the log.
+   *
+   * @param index the compaction index
+   * @return the starting index of the last segment in the log
+   */
+  public long getCompactableIndex(long index) {
+    Map.Entry<Long, JournalSegment<E>> segmentEntry = segments.floorEntry(index);
+    return segmentEntry != null ? segmentEntry.getValue().firstIndex() : 0;
+  }
+
+  /**
+   * Compacts the journal up to the given index.
+   * <p>
+   * The semantics of compaction are not specified by this interface.
+   *
+   * @param index The index up to which to compact the journal.
+   */
+  public void compact(long index) {
+    final var segmentEntry = segments.floorEntry(index);
+    if (segmentEntry != null) {
+      final var compactSegments = segments.headMap(segmentEntry.getValue().firstIndex());
+      if (!compactSegments.isEmpty()) {
+        LOG.debug("{} - Compacting {} segment(s)", name, compactSegments.size());
+        for (JournalSegment<E> segment : compactSegments.values()) {
+          LOG.trace("Deleting segment: {}", segment);
+          segment.close();
+          segment.delete();
+        }
+        compactSegments.clear();
+        resetHead(segmentEntry.getValue().firstIndex());
+      }
+    }
+  }
+
+  @Override
+  public void close() {
+    segments.values().forEach(segment -> {
+      LOG.debug("Closing segment: {}", segment);
+      segment.close();
+    });
+    currentSegment = null;
+    open = false;
+  }
+
+  /**
+   * Returns whether {@code flushOnCommit} is enabled for the log.
+   *
+   * @return Indicates whether {@code flushOnCommit} is enabled for the log.
+   */
+  boolean isFlushOnCommit() {
+    return flushOnCommit;
+  }
+
+  /**
+   * Commits entries up to the given index.
+   *
+   * @param index The index up to which to commit entries.
+   */
+  void setCommitIndex(long index) {
+    this.commitIndex = index;
+  }
+
+  /**
+   * Returns the Raft log commit index.
+   *
+   * @return The Raft log commit index.
+   */
+  long getCommitIndex() {
+    return commitIndex;
+  }
+
+  /**
+   * Raft log builder.
+   */
+  public static final class Builder<E> {
+    private static final boolean DEFAULT_FLUSH_ON_COMMIT = false;
+    private static final String DEFAULT_NAME = "atomix";
+    private static final String DEFAULT_DIRECTORY = System.getProperty("user.dir");
+    private static final int DEFAULT_MAX_SEGMENT_SIZE = 1024 * 1024 * 32;
+    private static final int DEFAULT_MAX_ENTRY_SIZE = 1024 * 1024;
+    private static final int DEFAULT_MAX_ENTRIES_PER_SEGMENT = 1024 * 1024;
+    private static final double DEFAULT_INDEX_DENSITY = .005;
+
+    private String name = DEFAULT_NAME;
+    private StorageLevel storageLevel = StorageLevel.DISK;
+    private File directory = new File(DEFAULT_DIRECTORY);
+    private JournalSerdes namespace;
+    private int maxSegmentSize = DEFAULT_MAX_SEGMENT_SIZE;
+    private int maxEntrySize = DEFAULT_MAX_ENTRY_SIZE;
+    private int maxEntriesPerSegment = DEFAULT_MAX_ENTRIES_PER_SEGMENT;
+    private double indexDensity = DEFAULT_INDEX_DENSITY;
+    private boolean flushOnCommit = DEFAULT_FLUSH_ON_COMMIT;
+
+    protected Builder() {
+    }
+
+    /**
+     * Sets the storage name.
+     *
+     * @param name The storage name.
+     * @return The storage builder.
+     */
+    public Builder<E> withName(String name) {
+      this.name = requireNonNull(name, "name cannot be null");
+      return this;
+    }
+
+    /**
+     * Sets the log storage level, returning the builder for method chaining.
+     * <p>
+     * The storage level indicates how individual entries should be persisted in the journal.
+     *
+     * @param storageLevel The log storage level.
+     * @return The storage builder.
+     */
+    public Builder<E> withStorageLevel(StorageLevel storageLevel) {
+      this.storageLevel = requireNonNull(storageLevel, "storageLevel cannot be null");
+      return this;
+    }
+
+    /**
+     * Sets the log directory, returning the builder for method chaining.
+     * <p>
+     * The log will write segment files into the provided directory.
+     *
+     * @param directory The log directory.
+     * @return The storage builder.
+     * @throws NullPointerException If the {@code directory} is {@code null}
+     */
+    public Builder<E> withDirectory(String directory) {
+      return withDirectory(new File(requireNonNull(directory, "directory cannot be null")));
+    }
+
+    /**
+     * Sets the log directory, returning the builder for method chaining.
+     * <p>
+     * The log will write segment files into the provided directory.
+     *
+     * @param directory The log directory.
+     * @return The storage builder.
+     * @throws NullPointerException If the {@code directory} is {@code null}
+     */
+    public Builder<E> withDirectory(File directory) {
+      this.directory = requireNonNull(directory, "directory cannot be null");
+      return this;
+    }
+
+    /**
+     * Sets the journal namespace, returning the builder for method chaining.
+     *
+     * @param namespace The journal serializer.
+     * @return The journal builder.
+     */
+    public Builder<E> withNamespace(JournalSerdes namespace) {
+      this.namespace = requireNonNull(namespace, "namespace cannot be null");
+      return this;
+    }
+
+    /**
+     * Sets the maximum segment size in bytes, returning the builder for method chaining.
+     * <p>
+     * The maximum segment size dictates when logs should roll over to new segments. As entries are written to a segment
+     * of the log, once the size of the segment surpasses the configured maximum segment size, the log will create a new
+     * segment and append new entries to that segment.
+     * <p>
+     * By default, the maximum segment size is {@code 1024 * 1024 * 32}.
+     *
+     * @param maxSegmentSize The maximum segment size in bytes.
+     * @return The storage builder.
+     * @throws IllegalArgumentException If the {@code maxSegmentSize} is not positive
+     */
+    public Builder<E> withMaxSegmentSize(int maxSegmentSize) {
+      checkArgument(maxSegmentSize > JournalSegmentDescriptor.BYTES, "maxSegmentSize must be greater than " + JournalSegmentDescriptor.BYTES);
+      this.maxSegmentSize = maxSegmentSize;
+      return this;
+    }
+
+    /**
+     * Sets the maximum entry size in bytes, returning the builder for method chaining.
+     *
+     * @param maxEntrySize the maximum entry size in bytes
+     * @return the storage builder
+     * @throws IllegalArgumentException if the {@code maxEntrySize} is not positive
+     */
+    public Builder<E> withMaxEntrySize(int maxEntrySize) {
+      checkArgument(maxEntrySize > 0, "maxEntrySize must be positive");
+      this.maxEntrySize = maxEntrySize;
+      return this;
+    }
+
+    /**
+     * Sets the maximum number of allows entries per segment, returning the builder for method chaining.
+     * <p>
+     * The maximum entry count dictates when logs should roll over to new segments. As entries are written to a segment
+     * of the log, if the entry count in that segment meets the configured maximum entry count, the log will create a
+     * new segment and append new entries to that segment.
+     * <p>
+     * By default, the maximum entries per segment is {@code 1024 * 1024}.
+     *
+     * @param maxEntriesPerSegment The maximum number of entries allowed per segment.
+     * @return The storage builder.
+     * @throws IllegalArgumentException If the {@code maxEntriesPerSegment} not greater than the default max entries
+     *     per segment
+     * @deprecated since 3.0.2
+     */
+    @Deprecated
+    public Builder<E> withMaxEntriesPerSegment(int maxEntriesPerSegment) {
+      checkArgument(maxEntriesPerSegment > 0, "max entries per segment must be positive");
+      checkArgument(maxEntriesPerSegment <= DEFAULT_MAX_ENTRIES_PER_SEGMENT,
+          "max entries per segment cannot be greater than " + DEFAULT_MAX_ENTRIES_PER_SEGMENT);
+      this.maxEntriesPerSegment = maxEntriesPerSegment;
+      return this;
+    }
+
+    /**
+     * Sets the journal index density.
+     * <p>
+     * The index density is the frequency at which the position of entries written to the journal will be recorded in an
+     * in-memory index for faster seeking.
+     *
+     * @param indexDensity the index density
+     * @return the journal builder
+     * @throws IllegalArgumentException if the density is not between 0 and 1
+     */
+    public Builder<E> withIndexDensity(double indexDensity) {
+      checkArgument(indexDensity > 0 && indexDensity < 1, "index density must be between 0 and 1");
+      this.indexDensity = indexDensity;
+      return this;
+    }
+
+    /**
+     * Enables flushing buffers to disk when entries are committed to a segment, returning the builder for method
+     * chaining.
+     * <p>
+     * When flush-on-commit is enabled, log entry buffers will be automatically flushed to disk each time an entry is
+     * committed in a given segment.
+     *
+     * @return The storage builder.
+     */
+    public Builder<E> withFlushOnCommit() {
+      return withFlushOnCommit(true);
+    }
+
+    /**
+     * Sets whether to flush buffers to disk when entries are committed to a segment, returning the builder for method
+     * chaining.
+     * <p>
+     * When flush-on-commit is enabled, log entry buffers will be automatically flushed to disk each time an entry is
+     * committed in a given segment.
+     *
+     * @param flushOnCommit Whether to flush buffers to disk when entries are committed to a segment.
+     * @return The storage builder.
+     */
+    public Builder<E> withFlushOnCommit(boolean flushOnCommit) {
+      this.flushOnCommit = flushOnCommit;
+      return this;
+    }
+
+    /**
+     * Build the {@link SegmentedJournal}.
+     *
+     * @return A new {@link SegmentedJournal}.
+     */
+    public SegmentedJournal<E> build() {
+      return new SegmentedJournal<>(
+          name,
+          storageLevel,
+          directory,
+          namespace,
+          maxSegmentSize,
+          maxEntrySize,
+          maxEntriesPerSegment,
+          indexDensity,
+          flushOnCommit);
+    }
+  }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournalReader.java b/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournalReader.java
new file mode 100644 (file)
index 0000000..cc0fe0d
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static java.util.Objects.requireNonNull;
+
+/**
+ * A {@link JournalReader} traversing all entries.
+ */
+sealed class SegmentedJournalReader<E> implements JournalReader<E> permits CommitsSegmentJournalReader {
+    final SegmentedJournal<E> journal;
+
+    private JournalSegment<E> currentSegment;
+    private JournalSegmentReader<E> currentReader;
+    private Indexed<E> currentEntry;
+    private long nextIndex;
+
+    SegmentedJournalReader(final SegmentedJournal<E> journal, final JournalSegment<E> segment) {
+        this.journal = requireNonNull(journal);
+        currentSegment = requireNonNull(segment);
+        currentReader = segment.createReader();
+        nextIndex = currentSegment.firstIndex();
+        currentEntry = null;
+    }
+
+    @Override
+    public final long getFirstIndex() {
+        return journal.getFirstSegment().firstIndex();
+    }
+
+    @Override
+    public final Indexed<E> getCurrentEntry() {
+        return currentEntry;
+    }
+
+    @Override
+    public final long getNextIndex() {
+        return nextIndex;
+    }
+
+    @Override
+    public final void reset() {
+        currentReader.close();
+
+        currentSegment = journal.getFirstSegment();
+        currentReader = currentSegment.createReader();
+        nextIndex = currentSegment.firstIndex();
+        currentEntry = null;
+    }
+
+    @Override
+    public final void reset(final long index) {
+        // If the current segment is not open, it has been replaced. Reset the segments.
+        if (!currentSegment.isOpen()) {
+            reset();
+        }
+
+        if (index < nextIndex) {
+            rewind(index);
+        } else if (index > nextIndex) {
+            while (index > nextIndex && tryNext() != null) {
+                // Nothing else
+            }
+        } else {
+            resetCurrentReader(index);
+        }
+    }
+
+    private void resetCurrentReader(final long index) {
+        final var position = currentSegment.lookup(index - 1);
+        if (position != null) {
+            nextIndex = position.index();
+            currentReader.setPosition(position.position());
+        } else {
+            nextIndex = currentSegment.firstIndex();
+            currentReader.setPosition(JournalSegmentDescriptor.BYTES);
+        }
+        while (nextIndex < index && tryNext() != null) {
+            // Nothing else
+        }
+    }
+
+    /**
+     * Rewinds the journal to the given index.
+     */
+    private void rewind(final long index) {
+        if (currentSegment.firstIndex() >= index) {
+            JournalSegment<E> segment = journal.getSegment(index - 1);
+            if (segment != null) {
+                currentReader.close();
+
+                currentSegment = segment;
+                currentReader = currentSegment.createReader();
+            }
+        }
+
+        resetCurrentReader(index);
+    }
+
+    @Override
+    public Indexed<E> tryNext() {
+        var next = currentReader.readEntry(nextIndex);
+        if (next == null) {
+            final var nextSegment = journal.getNextSegment(currentSegment.firstIndex());
+            if (nextSegment == null || nextSegment.firstIndex() != nextIndex) {
+                return null;
+            }
+
+            currentReader.close();
+
+            currentSegment = nextSegment;
+            currentReader = currentSegment.createReader();
+            next = currentReader.readEntry(nextIndex);
+            if (next == null) {
+                return null;
+            }
+        }
+
+        nextIndex = nextIndex + 1;
+        currentEntry = next;
+        return next;
+    }
+
+    @Override
+    public final void close() {
+        currentReader.close();
+        journal.closeReader(this);
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournalWriter.java b/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournalWriter.java
new file mode 100644 (file)
index 0000000..a95622e
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static com.google.common.base.Verify.verifyNotNull;
+
+/**
+ * Raft log writer.
+ */
+final class SegmentedJournalWriter<E> implements JournalWriter<E> {
+  private final SegmentedJournal<E> journal;
+  private JournalSegment<E> currentSegment;
+  private JournalSegmentWriter<E> currentWriter;
+
+  SegmentedJournalWriter(SegmentedJournal<E> journal) {
+    this.journal = journal;
+    this.currentSegment = journal.getLastSegment();
+    this.currentWriter = currentSegment.acquireWriter();
+  }
+
+  @Override
+  public long getLastIndex() {
+    return currentWriter.getLastIndex();
+  }
+
+  @Override
+  public Indexed<E> getLastEntry() {
+    return currentWriter.getLastEntry();
+  }
+
+  @Override
+  public long getNextIndex() {
+    return currentWriter.getNextIndex();
+  }
+
+  @Override
+  public void reset(long index) {
+    if (index > currentSegment.firstIndex()) {
+      currentSegment.releaseWriter();
+      currentSegment = journal.resetSegments(index);
+      currentWriter = currentSegment.acquireWriter();
+    } else {
+      truncate(index - 1);
+    }
+    journal.resetHead(index);
+  }
+
+  @Override
+  public void commit(long index) {
+    if (index > journal.getCommitIndex()) {
+      journal.setCommitIndex(index);
+      if (journal.isFlushOnCommit()) {
+        flush();
+      }
+    }
+  }
+
+  @Override
+  public <T extends E> Indexed<T> append(T entry) {
+    var indexed = currentWriter.append(entry);
+    if (indexed != null) {
+      return indexed;
+    }
+
+    //  Slow path: we do not have enough capacity
+    currentWriter.flush();
+    currentSegment.releaseWriter();
+    currentSegment = journal.getNextSegment();
+    currentWriter = currentSegment.acquireWriter();
+    return verifyNotNull(currentWriter.append(entry));
+  }
+
+  @Override
+  public void truncate(long index) {
+    if (index < journal.getCommitIndex()) {
+      throw new IndexOutOfBoundsException("Cannot truncate committed index: " + index);
+    }
+
+    // Delete all segments with first indexes greater than the given index.
+    while (index < currentSegment.firstIndex() && currentSegment != journal.getFirstSegment()) {
+      currentSegment.releaseWriter();
+      journal.removeSegment(currentSegment);
+      currentSegment = journal.getLastSegment();
+      currentWriter = currentSegment.acquireWriter();
+    }
+
+    // Truncate the current index.
+    currentWriter.truncate(index);
+
+    // Reset segment readers.
+    journal.resetTail(index + 1);
+  }
+
+  @Override
+  public void flush() {
+    currentWriter.flush();
+  }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/StorageException.java b/atomix-storage/src/main/java/io/atomix/storage/journal/StorageException.java
new file mode 100644 (file)
index 0000000..0a220ec
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2015-2021 Open Networking Foundation
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+/**
+ * Log exception.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+public class StorageException extends RuntimeException {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    public StorageException() {
+    }
+
+    public StorageException(final String message) {
+        super(message);
+    }
+
+    public StorageException(final String message, final Throwable cause) {
+        super(message, cause);
+    }
+
+    public StorageException(final Throwable cause) {
+        super(cause);
+    }
+
+    /**
+     * Exception thrown when an entry being stored is too large.
+     */
+    public static class TooLarge extends StorageException {
+        @java.io.Serial
+        private static final long serialVersionUID = 1L;
+
+        public TooLarge(final String message) {
+            super(message);
+        }
+
+        public TooLarge(final String message, final Throwable cause) {
+            super(message, cause);
+        }
+    }
+
+    /**
+     * Exception thrown when storage runs out of disk space.
+     */
+    public static class OutOfDiskSpace extends StorageException {
+        @java.io.Serial
+        private static final long serialVersionUID = 1L;
+
+        public OutOfDiskSpace(final String message) {
+            super(message);
+        }
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/StorageLevel.java b/atomix-storage/src/main/java/io/atomix/storage/journal/StorageLevel.java
new file mode 100644 (file)
index 0000000..e76a989
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2015-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+/**
+ * Storage level configuration values which control how logs are stored on disk or in memory.
+ */
+public enum StorageLevel {
+    /**
+     * Stores data in a memory-mapped file.
+     */
+    MAPPED,
+    /**
+     * Stores data on disk.
+     */
+    DISK
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/index/JournalIndex.java b/atomix-storage/src/main/java/io/atomix/storage/journal/index/JournalIndex.java
new file mode 100644 (file)
index 0000000..8608e00
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2018-2022 Open Networking Foundation and others.  All rights reserved.
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal.index;
+
+import org.eclipse.jdt.annotation.Nullable;
+
+/**
+ * Index of a particular JournalSegment.
+ */
+public interface JournalIndex {
+    /**
+     * Adds an entry for the given index at the given position.
+     *
+     * @param index the index for which to add the entry
+     * @param position the position of the given index
+     */
+    void index(long index, int position);
+
+    /**
+     * Looks up the position of the given index.
+     *
+     * @param index the index to lookup
+     * @return the position of the given index or a lesser index, or {@code null}
+     */
+    @Nullable Position lookup(long index);
+
+    /**
+     * Truncates the index to the given index and returns its position, if available.
+     *
+     * @param index the index to which to truncate the index, or {@code null}
+     * @return the position of the given index or a lesser index, or {@code null}
+     */
+    @Nullable Position truncate(long index);
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/index/Position.java b/atomix-storage/src/main/java/io/atomix/storage/journal/index/Position.java
new file mode 100644 (file)
index 0000000..640a8e8
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2018-2021 Open Networking Foundation
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal.index;
+
+import java.util.Map.Entry;
+import org.eclipse.jdt.annotation.Nullable;
+
+/**
+ * Journal index position.
+ */
+public record Position(long index, int position) {
+    public Position(final Entry<Long, Integer> entry) {
+        this(entry.getKey(), entry.getValue());
+    }
+
+    public static @Nullable Position ofNullable(final Entry<Long, Integer> entry) {
+        return entry == null ? null : new Position(entry);
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/index/SparseJournalIndex.java b/atomix-storage/src/main/java/io/atomix/storage/journal/index/SparseJournalIndex.java
new file mode 100644 (file)
index 0000000..2b31736
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2018-2022 Open Networking Foundation and others.  All rights reserved.
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal.index;
+
+import java.util.TreeMap;
+
+/**
+ * A {@link JournalIndex} maintaining target density.
+ */
+public final class SparseJournalIndex implements JournalIndex {
+    private static final int MIN_DENSITY = 1000;
+
+    private final int density;
+    private final TreeMap<Long, Integer> positions = new TreeMap<>();
+
+    public SparseJournalIndex() {
+        density = MIN_DENSITY;
+    }
+
+    public SparseJournalIndex(final double density) {
+        this.density = (int) Math.ceil(MIN_DENSITY / (density * MIN_DENSITY));
+    }
+
+    @Override
+    public void index(final long index, final int position) {
+        if (index % density == 0) {
+            positions.put(index, position);
+        }
+    }
+
+    @Override
+    public Position lookup(final long index) {
+        return Position.ofNullable(positions.floorEntry(index));
+    }
+
+    @Override
+    public Position truncate(final long index) {
+        positions.tailMap(index, false).clear();
+        return Position.ofNullable(positions.lastEntry());
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/index/package-info.java b/atomix-storage/src/main/java/io/atomix/storage/journal/index/package-info.java
new file mode 100644 (file)
index 0000000..c17cabe
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2018-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Provides classes and interfaces for efficiently managing journal indexes.
+ */
+package io.atomix.storage.journal.index;
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/package-info.java b/atomix-storage/src/main/java/io/atomix/storage/journal/package-info.java
new file mode 100644 (file)
index 0000000..7cabd15
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2018-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Provides a low-level journal abstraction for appending to logs and managing segmented logs.
+ */
+package io.atomix.storage.journal;
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/BufferAwareByteArrayOutputStream.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/BufferAwareByteArrayOutputStream.java
new file mode 100644 (file)
index 0000000..94fc322
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2014-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import java.io.ByteArrayOutputStream;
+
+/**
+ * Exposes protected byte array length in {@link ByteArrayOutputStream}.
+ */
+final class BufferAwareByteArrayOutputStream extends ByteArrayOutputStream {
+
+  BufferAwareByteArrayOutputStream(int size) {
+    super(size);
+  }
+
+  int getBufferSize() {
+    return buf.length;
+  }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/ByteArrayOutput.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/ByteArrayOutput.java
new file mode 100644 (file)
index 0000000..6df25b5
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2014-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import com.esotericsoftware.kryo.io.Output;
+
+/**
+ * Convenience class to avoid extra object allocation and casting.
+ */
+final class ByteArrayOutput extends Output {
+
+  private final BufferAwareByteArrayOutputStream stream;
+
+  ByteArrayOutput(final int bufferSize, final int maxBufferSize, final BufferAwareByteArrayOutputStream stream) {
+    super(bufferSize, maxBufferSize);
+    super.setOutputStream(stream);
+    this.stream = stream;
+  }
+
+  BufferAwareByteArrayOutputStream getByteArrayOutputStream() {
+    return stream;
+  }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/EntrySerializer.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/EntrySerializer.java
new file mode 100644 (file)
index 0000000..0508f1e
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import static java.util.Objects.requireNonNull;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.KryoException;
+import com.esotericsoftware.kryo.Serializer;
+import com.esotericsoftware.kryo.io.Input;
+import com.esotericsoftware.kryo.io.Output;
+import com.esotericsoftware.kryo.serializers.JavaSerializer;
+import com.google.common.base.MoreObjects;
+import io.atomix.storage.journal.JournalSerdes.EntrySerdes;
+import java.io.IOException;
+
+final class EntrySerializer<T> extends Serializer<T> {
+    // Note: uses identity to create things in Kryo, hence we want an instance for every serdes we wrap
+    private final JavaSerializer javaSerializer = new JavaSerializer();
+    private final EntrySerdes<T> serdes;
+
+    EntrySerializer(final EntrySerdes<T> serdes) {
+        this.serdes = requireNonNull(serdes);
+    }
+
+    @Override
+    public T read(final Kryo kryo, final Input input, final Class<T> type) {
+        try {
+            return serdes.read(new KryoEntryInput(kryo, input, javaSerializer));
+        } catch (IOException e) {
+            throw new KryoException(e);
+        }
+    }
+
+    @Override
+    public void write(final Kryo kryo, final Output output, final T object) {
+        try {
+            serdes.write(new KryoEntryOutput(kryo, output, javaSerializer), object);
+        } catch (IOException e) {
+            throw new KryoException(e);
+        }
+    }
+
+    @Override
+    public String toString() {
+        return MoreObjects.toStringHelper(this).addValue(serdes).toString();
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/Kryo505ByteBufferInput.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/Kryo505ByteBufferInput.java
new file mode 100644 (file)
index 0000000..ed66011
--- /dev/null
@@ -0,0 +1,243 @@
+/* Copyright (c) 2008, Nathan Sweet
+ * All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following
+ * conditions are met:
+ * 
+ * - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided with the distribution.
+ * - Neither the name of Esoteric Software nor the names of its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
+
+package io.atomix.utils.serializer;
+
+import com.esotericsoftware.kryo.io.ByteBufferInput;
+import java.nio.ByteBuffer;
+
+/**
+ * A Kryo-4.0.3 ByteBufferInput adapted to deal with
+ * <a href="https://github.com/EsotericSoftware/kryo/issues/505">issue 505</a>.
+ *
+ * @author Roman Levenstein &lt;romixlev@gmail.com&gt;
+ * @author Robert Varga
+ */
+public final class Kryo505ByteBufferInput extends ByteBufferInput {
+       Kryo505ByteBufferInput (ByteBuffer buffer) {
+               super(buffer);
+       }
+
+       @Override
+       public String readString () {
+               niobuffer.position(position);
+               int available = require(1);
+               position++;
+               int b = niobuffer.get();
+               if ((b & 0x80) == 0) return readAscii(); // ASCII.
+               // Null, empty, or UTF8.
+               int charCount = available >= 5 ? readUtf8Length(b) : readUtf8Length_slow(b);
+               switch (charCount) {
+               case 0:
+                       return null;
+               case 1:
+                       return "";
+               }
+               charCount--;
+               if (chars.length < charCount) chars = new char[charCount];
+               readUtf8(charCount);
+               return new String(chars, 0, charCount);
+       }
+
+       private int readUtf8Length (int b) {
+               int result = b & 0x3F; // Mask all but first 6 bits.
+               if ((b & 0x40) != 0) { // Bit 7 means another byte, bit 8 means UTF8.
+                       position++;
+                       b = niobuffer.get();
+                       result |= (b & 0x7F) << 6;
+                       if ((b & 0x80) != 0) {
+                               position++;
+                               b = niobuffer.get();
+                               result |= (b & 0x7F) << 13;
+                               if ((b & 0x80) != 0) {
+                                       position++;
+                                       b = niobuffer.get();
+                                       result |= (b & 0x7F) << 20;
+                                       if ((b & 0x80) != 0) {
+                                               position++;
+                                               b = niobuffer.get();
+                                               result |= (b & 0x7F) << 27;
+                                       }
+                               }
+                       }
+               }
+               return result;
+       }
+
+       private int readUtf8Length_slow (int b) {
+               int result = b & 0x3F; // Mask all but first 6 bits.
+               if ((b & 0x40) != 0) { // Bit 7 means another byte, bit 8 means UTF8.
+                       require(1);
+                       position++;
+                       b = niobuffer.get();
+                       result |= (b & 0x7F) << 6;
+                       if ((b & 0x80) != 0) {
+                               require(1);
+                               position++;
+                               b = niobuffer.get();
+                               result |= (b & 0x7F) << 13;
+                               if ((b & 0x80) != 0) {
+                                       require(1);
+                                       position++;
+                                       b = niobuffer.get();
+                                       result |= (b & 0x7F) << 20;
+                                       if ((b & 0x80) != 0) {
+                                               require(1);
+                                               position++;
+                                               b = niobuffer.get();
+                                               result |= (b & 0x7F) << 27;
+                                       }
+                               }
+                       }
+               }
+               return result;
+       }
+
+       private void readUtf8 (int charCount) {
+               char[] chars = this.chars;
+               // Try to read 7 bit ASCII chars.
+               int charIndex = 0;
+               int count = Math.min(require(1), charCount);
+               int position = this.position;
+               int b;
+               while (charIndex < count) {
+                       position++;
+                       b = niobuffer.get();
+                       if (b < 0) {
+                               position--;
+                               break;
+                       }
+                       chars[charIndex++] = (char)b;
+               }
+               this.position = position;
+               // If buffer didn't hold all chars or any were not ASCII, use slow path for remainder.
+               if (charIndex < charCount) {
+                       niobuffer.position(position);
+                       readUtf8_slow(charCount, charIndex);
+               }
+       }
+
+       private void readUtf8_slow (int charCount, int charIndex) {
+               char[] chars = this.chars;
+               while (charIndex < charCount) {
+                       if (position == limit) require(1);
+                       position++;
+                       int b = niobuffer.get() & 0xFF;
+                       switch (b >> 4) {
+                       case 0:
+                       case 1:
+                       case 2:
+                       case 3:
+                       case 4:
+                       case 5:
+                       case 6:
+                       case 7:
+                               chars[charIndex] = (char)b;
+                               break;
+                       case 12:
+                       case 13:
+                               if (position == limit) require(1);
+                               position++;
+                               chars[charIndex] = (char)((b & 0x1F) << 6 | niobuffer.get() & 0x3F);
+                               break;
+                       case 14:
+                               require(2);
+                               position += 2;
+                               int b2 = niobuffer.get();
+                               int b3 = niobuffer.get();
+                               chars[charIndex] = (char)((b & 0x0F) << 12 | (b2 & 0x3F) << 6 | b3 & 0x3F);
+                               break;
+                       }
+                       charIndex++;
+               }
+       }
+
+       private String readAscii () {
+               int end = position;
+               int start = end - 1;
+               int limit = this.limit;
+               int b;
+               do {
+                       if (end == limit) return readAscii_slow();
+                       end++;
+                       b = niobuffer.get();
+               } while ((b & 0x80) == 0);
+               int count = end - start;
+               byte[] tmp = new byte[count];
+               niobuffer.position(start);
+               niobuffer.get(tmp);
+               tmp[count - 1] &= 0x7F;  // Mask end of ascii bit.
+               String value = new String(tmp, 0, 0, count);
+               position = end;
+               niobuffer.position(position);
+               return value;
+       }
+
+       private String readAscii_slow () {
+               position--; // Re-read the first byte.
+               // Copy chars currently in buffer.
+               int charCount = limit - position;
+               if (charCount > chars.length) chars = new char[charCount * 2];
+               char[] chars = this.chars;
+               for (int i = position, ii = 0, n = limit; i < n; i++, ii++)
+                       chars[ii] = (char)niobuffer.get(i);
+               position = limit;
+               // Copy additional chars one by one.
+               while (true) {
+                       require(1);
+                       position++;
+                       int b = niobuffer.get();
+                       if (charCount == chars.length) {
+                               char[] newChars = new char[charCount * 2];
+                               System.arraycopy(chars, 0, newChars, 0, charCount);
+                               chars = newChars;
+                               this.chars = newChars;
+                       }
+                       if ((b & 0x80) == 0x80) {
+                               chars[charCount++] = (char)(b & 0x7F);
+                               break;
+                       }
+                       chars[charCount++] = (char)b;
+               }
+               return new String(chars, 0, charCount);
+       }
+
+       @Override
+       public StringBuilder readStringBuilder () {
+               niobuffer.position(position);
+               int available = require(1);
+               position++;
+               int b = niobuffer.get();
+               if ((b & 0x80) == 0) return new StringBuilder(readAscii()); // ASCII.
+               // Null, empty, or UTF8.
+               int charCount = available >= 5 ? readUtf8Length(b) : readUtf8Length_slow(b);
+               switch (charCount) {
+               case 0:
+                       return null;
+               case 1:
+                       return new StringBuilder("");
+               }
+               charCount--;
+               if (chars.length < charCount) chars = new char[charCount];
+               readUtf8(charCount);
+               StringBuilder builder = new StringBuilder(charCount);
+               builder.append(chars, 0, charCount);
+               return builder;
+       }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoEntryInput.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoEntryInput.java
new file mode 100644 (file)
index 0000000..2a98f16
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import static java.util.Objects.requireNonNull;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.KryoException;
+import com.esotericsoftware.kryo.io.Input;
+import com.esotericsoftware.kryo.serializers.JavaSerializer;
+import io.atomix.storage.journal.JournalSerdes.EntryInput;
+import java.io.IOException;
+
+final class KryoEntryInput implements EntryInput {
+    private final Kryo kryo;
+    private final Input input;
+    private final JavaSerializer javaSerializer;
+
+    KryoEntryInput(final Kryo kryo, final Input input, final JavaSerializer javaSerializer) {
+        this.kryo = requireNonNull(kryo);
+        this.input = requireNonNull(input);
+        this.javaSerializer = requireNonNull(javaSerializer);
+    }
+
+    @Override
+    public byte[] readBytes(final int length) throws IOException {
+        try {
+            return input.readBytes(length);
+        } catch (KryoException e) {
+            throw new IOException(e);
+        }
+    }
+
+    @Override
+    public long readLong() throws IOException {
+        try {
+            return input.readLong(false);
+        } catch (KryoException e) {
+            throw new IOException(e);
+        }
+    }
+
+    @Override
+    public Object readObject() throws IOException {
+        try {
+            return javaSerializer.read(kryo, input, null);
+        } catch (KryoException e) {
+            throw new IOException(e);
+        }
+    }
+
+    @Override
+    public String readString() throws IOException {
+        try {
+            return input.readString();
+        } catch (KryoException e) {
+            throw new IOException(e);
+        }
+    }
+
+    @Override
+    public int readVarInt() throws IOException {
+        try {
+            return input.readVarInt(true);
+        } catch (KryoException e) {
+            throw new IOException(e);
+        }
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoEntryOutput.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoEntryOutput.java
new file mode 100644 (file)
index 0000000..90886dd
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import static java.util.Objects.requireNonNull;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.KryoException;
+import com.esotericsoftware.kryo.io.Output;
+import com.esotericsoftware.kryo.serializers.JavaSerializer;
+import io.atomix.storage.journal.JournalSerdes.EntryOutput;
+import java.io.IOException;
+
+final class KryoEntryOutput implements EntryOutput {
+    private final Kryo kryo;
+    private final Output output;
+    private final JavaSerializer javaSerializer;
+
+    KryoEntryOutput(final Kryo kryo, final Output output, final JavaSerializer javaSerializer) {
+        this.kryo = requireNonNull(kryo);
+        this.output = requireNonNull(output);
+        this.javaSerializer = requireNonNull(javaSerializer);
+    }
+
+    @Override
+    public void writeBytes(final byte[] bytes) throws IOException {
+        try {
+            output.writeBytes(bytes);
+        } catch (KryoException e) {
+            throw new IOException(e);
+        }
+    }
+
+    @Override
+    public void writeLong(final long value) throws IOException {
+        try {
+            output.writeLong(value, false);
+        } catch (KryoException e) {
+            throw new IOException(e);
+        }
+    }
+
+    @Override
+    public void writeObject(final Object value) throws IOException {
+        try {
+            javaSerializer.write(kryo, output, value);
+        } catch (KryoException e) {
+            throw new IOException(e);
+        }
+    }
+
+    @Override
+    public void writeString(final String value) throws IOException {
+        try {
+            output.writeString(value);
+        } catch (KryoException e) {
+            throw new IOException(e);
+        }
+    }
+
+    @Override
+    public void writeVarInt(final int value) throws IOException {
+        try {
+            output.writeVarInt(value, true);
+        } catch (KryoException e) {
+            throw new IOException(e);
+        }
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoIOPool.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoIOPool.java
new file mode 100644 (file)
index 0000000..6324631
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2014-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import java.lang.ref.SoftReference;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.function.Function;
+
+abstract class KryoIOPool<T> {
+
+  private final ConcurrentLinkedQueue<SoftReference<T>> queue = new ConcurrentLinkedQueue<>();
+
+  private T borrow(final int bufferSize) {
+    T element;
+    SoftReference<T> reference;
+    while ((reference = queue.poll()) != null) {
+      if ((element = reference.get()) != null) {
+        return element;
+      }
+    }
+    return create(bufferSize);
+  }
+
+  protected abstract T create(final int bufferSize);
+
+  protected abstract boolean recycle(final T element);
+
+  <R> R run(final Function<T, R> function, final int bufferSize) {
+    final T element = borrow(bufferSize);
+    try {
+      return function.apply(element);
+    } finally {
+      if (recycle(element)) {
+        queue.offer(new SoftReference<>(element));
+      }
+    }
+  }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoInputPool.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoInputPool.java
new file mode 100644 (file)
index 0000000..0eeb8df
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2014-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import com.esotericsoftware.kryo.io.Input;
+
+class KryoInputPool extends KryoIOPool<Input> {
+
+  static final int MAX_POOLED_BUFFER_SIZE = 512 * 1024;
+
+  @Override
+  protected Input create(int bufferSize) {
+    return new Input(bufferSize);
+  }
+
+  @Override
+  protected boolean recycle(Input input) {
+    if (input.getBuffer().length < MAX_POOLED_BUFFER_SIZE) {
+      input.setInputStream(null);
+      return true;
+    }
+    return false; // discard
+  }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoJournalSerdes.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoJournalSerdes.java
new file mode 100644 (file)
index 0000000..64f3538
--- /dev/null
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2014-2021 Open Networking Foundation
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import static java.util.Objects.requireNonNull;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.Registration;
+import com.esotericsoftware.kryo.Serializer;
+import com.esotericsoftware.kryo.io.ByteBufferInput;
+import com.esotericsoftware.kryo.io.ByteBufferOutput;
+import com.esotericsoftware.kryo.pool.KryoCallback;
+import com.esotericsoftware.kryo.pool.KryoFactory;
+import com.esotericsoftware.kryo.pool.KryoPool;
+import com.google.common.base.MoreObjects;
+import io.atomix.storage.journal.JournalSerdes;
+import java.io.ByteArrayInputStream;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.List;
+import org.objenesis.strategy.StdInstantiatorStrategy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Pool of Kryo instances, with classes pre-registered.
+ */
+final class KryoJournalSerdes implements JournalSerdes, KryoFactory, KryoPool {
+    /**
+     * Default buffer size used for serialization.
+     *
+     * @see #serialize(Object)
+     */
+    private static final int DEFAULT_BUFFER_SIZE = 4096;
+
+    /**
+     * Smallest ID free to use for user defined registrations.
+     */
+    private static final int INITIAL_ID = 16;
+
+    static final String NO_NAME = "(no name)";
+
+    private static final Logger LOGGER = LoggerFactory.getLogger(KryoJournalSerdes.class);
+
+    private final KryoPool kryoPool = new KryoPool.Builder(this).softReferences().build();
+
+    private final KryoOutputPool kryoOutputPool = new KryoOutputPool();
+    private final KryoInputPool kryoInputPool = new KryoInputPool();
+
+    private final List<RegisteredType> registeredTypes;
+    private final ClassLoader classLoader;
+    private final String friendlyName;
+
+    /**
+     * Creates a Kryo instance pool.
+     *
+     * @param registeredTypes      types to register
+     * @param registrationRequired whether registration is required
+     * @param friendlyName         friendly name for the namespace
+     */
+    KryoJournalSerdes(
+            final List<RegisteredType> registeredTypes,
+            final ClassLoader classLoader,
+            final String friendlyName) {
+        this.registeredTypes = List.copyOf(registeredTypes);
+        this.classLoader = requireNonNull(classLoader);
+        this.friendlyName = requireNonNull(friendlyName);
+
+        // Pre-populate with a single instance
+        release(create());
+    }
+
+    @Override
+    public byte[] serialize(final Object obj) {
+        return serialize(obj, DEFAULT_BUFFER_SIZE);
+    }
+
+    @Override
+    public byte[] serialize(final Object obj, final int bufferSize) {
+        return kryoOutputPool.run(output -> kryoPool.run(kryo -> {
+            kryo.writeClassAndObject(output, obj);
+            output.flush();
+            return output.getByteArrayOutputStream().toByteArray();
+        }), bufferSize);
+    }
+
+    @Override
+    public void serialize(final Object obj, final ByteBuffer buffer) {
+        ByteBufferOutput out = new ByteBufferOutput(buffer);
+        Kryo kryo = borrow();
+        try {
+            kryo.writeClassAndObject(out, obj);
+            out.flush();
+        } finally {
+            release(kryo);
+        }
+    }
+
+    @Override
+    public void serialize(final Object obj, final OutputStream stream) {
+        serialize(obj, stream, DEFAULT_BUFFER_SIZE);
+    }
+
+    @Override
+    public void serialize(final Object obj, final OutputStream stream, final int bufferSize) {
+        ByteBufferOutput out = new ByteBufferOutput(stream, bufferSize);
+        Kryo kryo = borrow();
+        try {
+            kryo.writeClassAndObject(out, obj);
+            out.flush();
+        } finally {
+            release(kryo);
+        }
+    }
+
+    @Override
+    public <T> T deserialize(final byte[] bytes) {
+        return kryoInputPool.run(input -> {
+            input.setInputStream(new ByteArrayInputStream(bytes));
+            return kryoPool.run(kryo -> {
+                @SuppressWarnings("unchecked")
+                T obj = (T) kryo.readClassAndObject(input);
+                return obj;
+            });
+        }, DEFAULT_BUFFER_SIZE);
+    }
+
+    @Override
+    public <T> T deserialize(final ByteBuffer buffer) {
+        Kryo kryo = borrow();
+        try {
+            @SuppressWarnings("unchecked")
+            T obj = (T) kryo.readClassAndObject(new Kryo505ByteBufferInput(buffer));
+            return obj;
+        } finally {
+            release(kryo);
+        }
+    }
+
+    @Override
+    public <T> T deserialize(final InputStream stream) {
+        return deserialize(stream, DEFAULT_BUFFER_SIZE);
+    }
+
+    @Override
+    public <T> T deserialize(final InputStream stream, final int bufferSize) {
+        Kryo kryo = borrow();
+        try {
+            @SuppressWarnings("unchecked")
+            T obj = (T) kryo.readClassAndObject(new ByteBufferInput(stream, bufferSize));
+            return obj;
+        } finally {
+            release(kryo);
+        }
+    }
+
+    /**
+     * Creates a Kryo instance.
+     *
+     * @return Kryo instance
+     */
+    @Override
+    public Kryo create() {
+        LOGGER.trace("Creating Kryo instance for {}", this);
+        Kryo kryo = new Kryo();
+        kryo.setClassLoader(classLoader);
+        kryo.setRegistrationRequired(true);
+
+        // TODO rethink whether we want to use StdInstantiatorStrategy
+        kryo.setInstantiatorStrategy(
+            new Kryo.DefaultInstantiatorStrategy(new StdInstantiatorStrategy()));
+
+        int id = INITIAL_ID;
+        for (RegisteredType registeredType : registeredTypes) {
+            register(kryo, registeredType.types(), registeredType.serializer(), id++);
+        }
+        return kryo;
+    }
+
+    /**
+     * Register {@code type} and {@code serializer} to {@code kryo} instance.
+     *
+     * @param kryo       Kryo instance
+     * @param types      types to register
+     * @param serializer Specific serializer to register or null to use default.
+     * @param id         type registration id to use
+     */
+    private void register(final Kryo kryo, final Class<?>[] types, final Serializer<?> serializer, final int id) {
+        Registration existing = kryo.getRegistration(id);
+        if (existing != null) {
+            boolean matches = false;
+            for (Class<?> type : types) {
+                if (existing.getType() == type) {
+                    matches = true;
+                    break;
+                }
+            }
+
+            if (!matches) {
+                LOGGER.error("{}: Failed to register {} as {}, {} was already registered.",
+                    friendlyName, types, id, existing.getType());
+
+                throw new IllegalStateException(String.format(
+                    "Failed to register %s as %s, %s was already registered.",
+                    Arrays.toString(types), id, existing.getType()));
+            }
+            // falling through to register call for now.
+            // Consider skipping, if there's reasonable
+            // way to compare serializer equivalence.
+        }
+
+        for (Class<?> type : types) {
+            Registration r = null;
+            if (serializer == null) {
+                r = kryo.register(type, id);
+            } else if (type.isInterface()) {
+                kryo.addDefaultSerializer(type, serializer);
+            } else {
+                r = kryo.register(type, serializer, id);
+            }
+            if (r != null) {
+                if (r.getId() != id) {
+                    LOGGER.debug("{}: {} already registered as {}. Skipping {}.",
+                        friendlyName, r.getType(), r.getId(), id);
+                }
+                LOGGER.trace("{} registered as {}", r.getType(), r.getId());
+            }
+        }
+    }
+
+    @Override
+    public Kryo borrow() {
+        return kryoPool.borrow();
+    }
+
+    @Override
+    public void release(final Kryo kryo) {
+        kryoPool.release(kryo);
+    }
+
+    @Override
+    public <T> T run(final KryoCallback<T> callback) {
+        return kryoPool.run(callback);
+    }
+
+    @Override
+    public String toString() {
+        if (!NO_NAME.equals(friendlyName)) {
+            return MoreObjects.toStringHelper(getClass())
+                .omitNullValues()
+                .add("friendlyName", friendlyName)
+                // omit lengthy detail, when there's a name
+                .toString();
+        }
+        return MoreObjects.toStringHelper(getClass()).add("registeredTypes", registeredTypes).toString();
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoJournalSerdesBuilder.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoJournalSerdesBuilder.java
new file mode 100644 (file)
index 0000000..a62d8b3
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2014-2021 Open Networking Foundation
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+
+import io.atomix.storage.journal.JournalSerdes;
+import io.atomix.storage.journal.JournalSerdes.Builder;
+import io.atomix.storage.journal.JournalSerdes.EntrySerdes;
+import java.util.ArrayList;
+import java.util.List;
+
+public final class KryoJournalSerdesBuilder implements Builder {
+    private final List<RegisteredType> types = new ArrayList<>();
+    private ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+
+    @Override
+    public KryoJournalSerdesBuilder register(final EntrySerdes<?> serdes, final Class<?>... classes) {
+        types.add(new RegisteredType(new EntrySerializer<>(serdes), classes));
+        return this;
+    }
+
+    @Override
+    public KryoJournalSerdesBuilder setClassLoader(final ClassLoader classLoader) {
+        this.classLoader = requireNonNull(classLoader);
+        return this;
+    }
+
+    @Override
+    public JournalSerdes build() {
+        return build(KryoJournalSerdes.NO_NAME);
+    }
+
+    @Override
+    public JournalSerdes build(final String friendlyName) {
+        checkState(!types.isEmpty(), "No serializers registered");
+        return new KryoJournalSerdes(types, classLoader, friendlyName);
+    }
+}
\ No newline at end of file
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoOutputPool.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoOutputPool.java
new file mode 100644 (file)
index 0000000..6b1737f
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2014-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+class KryoOutputPool extends KryoIOPool<ByteArrayOutput> {
+
+  private static final int MAX_BUFFER_SIZE = 768 * 1024;
+  static final int MAX_POOLED_BUFFER_SIZE = 512 * 1024;
+
+  @Override
+  protected ByteArrayOutput create(int bufferSize) {
+    return new ByteArrayOutput(bufferSize, MAX_BUFFER_SIZE, new BufferAwareByteArrayOutputStream(bufferSize));
+  }
+
+  @Override
+  protected boolean recycle(ByteArrayOutput output) {
+    if (output.getByteArrayOutputStream().getBufferSize() < MAX_POOLED_BUFFER_SIZE) {
+      output.getByteArrayOutputStream().reset();
+      output.clear();
+      return true;
+    }
+    return false; // discard
+  }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/RegisteredType.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/RegisteredType.java
new file mode 100644 (file)
index 0000000..0a17c09
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import static java.util.Objects.requireNonNull;
+
+record RegisteredType(EntrySerializer<?> serializer, Class<?>[] types) {
+    RegisteredType {
+        requireNonNull(serializer);
+        requireNonNull(types);
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/package-info.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/package-info.java
new file mode 100644 (file)
index 0000000..afc8022
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2018-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Provides classes and interfaces for binary serialization.
+ */
+package io.atomix.utils.serializer;
diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/AbstractJournalTest.java b/atomix-storage/src/test/java/io/atomix/storage/journal/AbstractJournalTest.java
new file mode 100644 (file)
index 0000000..14e59e5
--- /dev/null
@@ -0,0 +1,429 @@
+/*
+ * Copyright 2017-2021 Open Networking Foundation
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+
+import java.io.IOException;
+import java.nio.file.FileVisitResult;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.SimpleFileVisitor;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.util.ArrayList;
+import java.util.List;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+/**
+ * Base journal test.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+@RunWith(Parameterized.class)
+public abstract class AbstractJournalTest {
+    private static final JournalSerdes NAMESPACE = JournalSerdes.builder()
+        .register(new TestEntrySerdes(), TestEntry.class)
+        .register(new ByteArraySerdes(), byte[].class)
+        .build();
+
+    protected static final TestEntry ENTRY = new TestEntry(32);
+    private static final Path PATH = Paths.get("target/test-logs/");
+
+    private final StorageLevel storageLevel;
+    private final int maxSegmentSize;
+    protected final int entriesPerSegment;
+
+    protected AbstractJournalTest(final StorageLevel storageLevel, final int maxSegmentSize) {
+        this.storageLevel = storageLevel;
+        this.maxSegmentSize = maxSegmentSize;
+        int entryLength = NAMESPACE.serialize(ENTRY).length + 8;
+        entriesPerSegment = (maxSegmentSize - 64) / entryLength;
+    }
+
+    @Parameterized.Parameters
+    public static List<Object[]> primeNumbers() {
+        var runs = new ArrayList<Object[]>();
+        for (int i = 1; i <= 10; i++) {
+            for (int j = 1; j <= 10; j++) {
+                runs.add(new Object[] { 64 + i * (NAMESPACE.serialize(ENTRY).length + 8) + j });
+            }
+        }
+        return runs;
+    }
+
+    protected SegmentedJournal<TestEntry> createJournal() {
+        return SegmentedJournal.<TestEntry>builder()
+            .withName("test")
+            .withDirectory(PATH.toFile())
+            .withNamespace(NAMESPACE)
+            .withStorageLevel(storageLevel)
+            .withMaxSegmentSize(maxSegmentSize)
+            .withIndexDensity(.2)
+            .build();
+    }
+
+    @Test
+    public void testCloseMultipleTimes() {
+        // given
+        final Journal<TestEntry> journal = createJournal();
+
+        // when
+        journal.close();
+
+        // then
+        journal.close();
+    }
+
+    @Test
+    public void testWriteRead() throws Exception {
+        try (Journal<TestEntry> journal = createJournal()) {
+            JournalWriter<TestEntry> writer = journal.writer();
+            JournalReader<TestEntry> reader = journal.openReader(1);
+
+            // Append a couple entries.
+            assertEquals(1, writer.getNextIndex());
+            var indexed = writer.append(ENTRY);
+            assertEquals(1, indexed.index());
+
+            assertEquals(2, writer.getNextIndex());
+            writer.append(ENTRY);
+            reader.reset(2);
+            indexed = reader.tryNext();
+            assertNotNull(indexed);
+            assertEquals(2, indexed.index());
+            assertNull(reader.tryNext());
+
+            // Test reading an entry
+            reader.reset();
+            var entry1 = reader.tryNext();
+            assertNotNull(entry1);
+            assertEquals(1, entry1.index());
+            assertEquals(entry1, reader.getCurrentEntry());
+
+            // Test reading a second entry
+            assertEquals(2, reader.getNextIndex());
+            var entry2 = reader.tryNext();
+            assertNotNull(entry2);
+            assertEquals(2, entry2.index());
+            assertEquals(entry2, reader.getCurrentEntry());
+            assertEquals(3, reader.getNextIndex());
+            assertNull(reader.tryNext());
+
+            // Test opening a new reader and reading from the journal.
+            reader = journal.openReader(1);
+            entry1 = reader.tryNext();
+            assertNotNull(entry1);
+            assertEquals(1, entry1.index());
+            assertEquals(entry1, reader.getCurrentEntry());
+
+            assertEquals(2, reader.getNextIndex());
+            entry2 = reader.tryNext();
+            assertNotNull(entry2);
+            assertEquals(2, entry2.index());
+            assertEquals(entry2, reader.getCurrentEntry());
+            assertNull(reader.tryNext());
+
+            // Reset the reader.
+            reader.reset();
+
+            // Test opening a new reader and reading from the journal.
+            reader = journal.openReader(1);
+            entry1 = reader.tryNext();
+            assertNotNull(entry1);
+            assertEquals(1, entry1.index());
+            assertEquals(entry1, reader.getCurrentEntry());
+
+            assertEquals(2, reader.getNextIndex());
+            entry2 = reader.tryNext();
+            assertNotNull(entry2);
+            assertEquals(2, entry2.index());
+            assertEquals(entry2, reader.getCurrentEntry());
+            assertNull(reader.tryNext());
+
+            // Truncate the journal and write a different entry.
+            writer.truncate(1);
+            assertEquals(2, writer.getNextIndex());
+            writer.append(ENTRY);
+            reader.reset(2);
+            indexed = reader.tryNext();
+            assertNotNull(indexed);
+            assertEquals(2, indexed.index());
+
+            // Reset the reader to a specific index and read the last entry again.
+            reader.reset(2);
+
+            final var current = reader.getCurrentEntry();
+            assertNotNull(current);
+            assertEquals(1, current.index());
+            assertEquals(2, reader.getNextIndex());
+            entry2 = reader.tryNext();
+            assertNotNull(entry2);
+            assertEquals(2, entry2.index());
+            assertEquals(entry2, reader.getCurrentEntry());
+            assertNull(reader.tryNext());
+        }
+    }
+
+    @Test
+    public void testResetTruncateZero() throws Exception {
+        try (SegmentedJournal<TestEntry> journal = createJournal()) {
+            JournalWriter<TestEntry> writer = journal.writer();
+            JournalReader<TestEntry> reader = journal.openReader(1);
+
+            assertEquals(0, writer.getLastIndex());
+            writer.append(ENTRY);
+            writer.append(ENTRY);
+            writer.reset(1);
+            assertEquals(0, writer.getLastIndex());
+            writer.append(ENTRY);
+
+            var indexed = reader.tryNext();
+            assertNotNull(indexed);
+            assertEquals(1, indexed.index());
+            writer.reset(1);
+            assertEquals(0, writer.getLastIndex());
+            writer.append(ENTRY);
+            assertEquals(1, writer.getLastIndex());
+            assertEquals(1, writer.getLastEntry().index());
+
+            indexed = reader.tryNext();
+            assertNotNull(indexed);
+            assertEquals(1, indexed.index());
+
+            writer.truncate(0);
+            assertEquals(0, writer.getLastIndex());
+            assertNull(writer.getLastEntry());
+            writer.append(ENTRY);
+            assertEquals(1, writer.getLastIndex());
+            assertEquals(1, writer.getLastEntry().index());
+
+            indexed = reader.tryNext();
+            assertNotNull(indexed);
+            assertEquals(1, indexed.index());
+        }
+    }
+
+    @Test
+    public void testTruncateRead() throws Exception {
+        int i = 10;
+        try (Journal<TestEntry> journal = createJournal()) {
+            JournalWriter<TestEntry> writer = journal.writer();
+            JournalReader<TestEntry> reader = journal.openReader(1);
+
+            for (int j = 1; j <= i; j++) {
+                assertEquals(j, writer.append(new TestEntry(32)).index());
+            }
+
+            for (int j = 1; j <= i - 2; j++) {
+                final var indexed = reader.tryNext();
+                assertNotNull(indexed);
+                assertEquals(j, indexed.index());
+            }
+
+            writer.truncate(i - 2);
+
+            assertNull(reader.tryNext());
+            assertEquals(i - 1, writer.append(new TestEntry(32)).index());
+            assertEquals(i, writer.append(new TestEntry(32)).index());
+
+            Indexed<TestEntry> entry = reader.tryNext();
+            assertNotNull(entry);
+            assertEquals(i - 1, entry.index());
+            entry = reader.tryNext();
+            assertNotNull(entry);
+            assertEquals(i, entry.index());
+        }
+    }
+
+    @Test
+    public void testWriteReadEntries() throws Exception {
+        try (Journal<TestEntry> journal = createJournal()) {
+            JournalWriter<TestEntry> writer = journal.writer();
+            JournalReader<TestEntry> reader = journal.openReader(1);
+
+            for (int i = 1; i <= entriesPerSegment * 5; i++) {
+                writer.append(ENTRY);
+                var entry = reader.tryNext();
+                assertNotNull(entry);
+                assertEquals(i, entry.index());
+                assertEquals(32, entry.entry().bytes().length);
+                reader.reset(i);
+                entry = reader.tryNext();
+                assertNotNull(entry);
+                assertEquals(i, entry.index());
+                assertEquals(32, entry.entry().bytes().length);
+
+                if (i > 6) {
+                    reader.reset(i - 5);
+                    final var current = reader.getCurrentEntry();
+                    assertNotNull(current);
+                    assertEquals(i - 6, current.index());
+                    assertEquals(i - 5, reader.getNextIndex());
+                    reader.reset(i + 1);
+                }
+
+                writer.truncate(i - 1);
+                writer.append(ENTRY);
+
+                assertNotNull(reader.tryNext());
+                reader.reset(i);
+                entry = reader.tryNext();
+                assertNotNull(entry);
+                assertEquals(i, entry.index());
+                assertEquals(32, entry.entry().bytes().length);
+            }
+        }
+    }
+
+    @Test
+    public void testWriteReadCommittedEntries() throws Exception {
+        try (Journal<TestEntry> journal = createJournal()) {
+            JournalWriter<TestEntry> writer = journal.writer();
+            JournalReader<TestEntry> reader = journal.openReader(1, JournalReader.Mode.COMMITS);
+
+            for (int i = 1; i <= entriesPerSegment * 5; i++) {
+                writer.append(ENTRY);
+                assertNull(reader.tryNext());
+                writer.commit(i);
+                var entry = reader.tryNext();
+                assertNotNull(entry);
+                assertEquals(i, entry.index());
+                assertEquals(32, entry.entry().bytes().length);
+                reader.reset(i);
+                entry = reader.tryNext();
+                assertNotNull(entry);
+                assertEquals(i, entry.index());
+                assertEquals(32, entry.entry().bytes().length);
+            }
+        }
+    }
+
+    @Test
+    public void testReadAfterCompact() throws Exception {
+        try (SegmentedJournal<TestEntry> journal = createJournal()) {
+            JournalWriter<TestEntry> writer = journal.writer();
+            JournalReader<TestEntry> uncommittedReader = journal.openReader(1, JournalReader.Mode.ALL);
+            JournalReader<TestEntry> committedReader = journal.openReader(1, JournalReader.Mode.COMMITS);
+
+            for (int i = 1; i <= entriesPerSegment * 10; i++) {
+                assertEquals(i, writer.append(ENTRY).index());
+            }
+
+            assertEquals(1, uncommittedReader.getNextIndex());
+            assertEquals(1, committedReader.getNextIndex());
+
+            // This creates asymmetry, as uncommitted reader will move one step ahead...
+            assertNotNull(uncommittedReader.tryNext());
+            assertEquals(2, uncommittedReader.getNextIndex());
+            assertNull(committedReader.tryNext());
+            assertEquals(1, committedReader.getNextIndex());
+
+            writer.commit(entriesPerSegment * 9);
+
+            // ... so here we catch up ...
+            assertNotNull(committedReader.tryNext());
+            assertEquals(2, committedReader.getNextIndex());
+
+            // ... and continue from the second entry
+            for (int i = 2; i <= entriesPerSegment * 2.5; i++) {
+                var entry = uncommittedReader.tryNext();
+                assertNotNull(entry);
+                assertEquals(i, entry.index());
+
+                entry = committedReader.tryNext();
+                assertNotNull(entry);
+                assertEquals(i, entry.index());
+            }
+
+            journal.compact(entriesPerSegment * 5 + 1);
+
+            assertNull(uncommittedReader.getCurrentEntry());
+            assertEquals(entriesPerSegment * 5 + 1, uncommittedReader.getNextIndex());
+            var entry = uncommittedReader.tryNext();
+            assertNotNull(entry);
+            assertEquals(entriesPerSegment * 5 + 1, entry.index());
+
+            assertNull(committedReader.getCurrentEntry());
+            assertEquals(entriesPerSegment * 5 + 1, committedReader.getNextIndex());
+            entry = committedReader.tryNext();
+            assertNotNull(entry);
+            assertEquals(entriesPerSegment * 5 + 1, entry.index());
+        }
+    }
+
+    /**
+     * Tests reading from a compacted journal.
+     */
+    @Test
+    public void testCompactAndRecover() throws Exception {
+        try (var journal = createJournal()) {
+            // Write three segments to the journal.
+            final var writer = journal.writer();
+            for (int i = 0; i < entriesPerSegment * 3; i++) {
+                writer.append(ENTRY);
+            }
+
+            // Commit the entries and compact the first segment.
+            writer.commit(entriesPerSegment * 3);
+            journal.compact(entriesPerSegment + 1);
+        }
+
+        // Reopen the journal and create a reader.
+        try (var journal = createJournal()) {
+            final var writer = journal.writer();
+            final var reader = journal.openReader(1, JournalReader.Mode.COMMITS);
+            writer.append(ENTRY);
+            writer.append(ENTRY);
+            writer.commit(entriesPerSegment * 3);
+
+            // Ensure the reader starts at the first physical index in the journal.
+            assertEquals(entriesPerSegment + 1, reader.getNextIndex());
+            assertEquals(reader.getFirstIndex(), reader.getNextIndex());
+            final var indexed = reader.tryNext();
+            assertNotNull(indexed);
+            assertEquals(entriesPerSegment + 1, indexed.index());
+            assertEquals(entriesPerSegment + 2, reader.getNextIndex());
+        }
+    }
+
+    @Before
+    @After
+    public void cleanupStorage() throws IOException {
+        if (Files.exists(PATH)) {
+            Files.walkFileTree(PATH, new SimpleFileVisitor<Path>() {
+                @Override
+                public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs) throws IOException {
+                    Files.delete(file);
+                    return FileVisitResult.CONTINUE;
+                }
+
+                @Override
+                public FileVisitResult postVisitDirectory(final Path dir, final IOException exc) throws IOException {
+                    Files.delete(dir);
+                    return FileVisitResult.CONTINUE;
+                }
+            });
+        }
+    }
+}
diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/ByteArraySerdes.java b/atomix-storage/src/test/java/io/atomix/storage/journal/ByteArraySerdes.java
new file mode 100644 (file)
index 0000000..79ce909
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import io.atomix.storage.journal.JournalSerdes.EntryInput;
+import io.atomix.storage.journal.JournalSerdes.EntryOutput;
+import io.atomix.storage.journal.JournalSerdes.EntrySerdes;
+import java.io.IOException;
+
+final class ByteArraySerdes implements EntrySerdes<byte[]> {
+    @Override
+    public byte[] read(final EntryInput input) throws IOException {
+        int length = input.readVarInt();
+        return length == 0 ? null : input.readBytes(length - 1);
+    }
+
+    @Override
+    public void write(final EntryOutput output, final byte[] entry) throws IOException {
+        if (entry != null) {
+            output.writeVarInt(entry.length + 1);
+            output.writeBytes(entry);
+        } else {
+            output.writeVarInt(0);
+        }
+    }
+}
diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/DiskJournalTest.java b/atomix-storage/src/test/java/io/atomix/storage/journal/DiskJournalTest.java
new file mode 100644 (file)
index 0000000..11cbd6c
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+/**
+ * Disk journal test.
+ */
+public class DiskJournalTest extends AbstractJournalTest {
+    public DiskJournalTest(final int maxSegmentSize) {
+        super(StorageLevel.DISK, maxSegmentSize);
+    }
+}
diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/JournalSegmentDescriptorTest.java b/atomix-storage/src/test/java/io/atomix/storage/journal/JournalSegmentDescriptorTest.java
new file mode 100644 (file)
index 0000000..6db959d
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import org.junit.Test;
+
+import java.nio.ByteBuffer;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Segment descriptor test.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+public class JournalSegmentDescriptorTest {
+
+  /**
+   * Tests the segment descriptor builder.
+   */
+  @Test
+  public void testDescriptorBuilder() {
+    JournalSegmentDescriptor descriptor = JournalSegmentDescriptor.builder(ByteBuffer.allocate(JournalSegmentDescriptor.BYTES))
+        .withId(2)
+        .withIndex(1025)
+        .withMaxSegmentSize(1024 * 1024)
+        .withMaxEntries(2048)
+        .build();
+
+    assertEquals(2, descriptor.id());
+    assertEquals(JournalSegmentDescriptor.VERSION, descriptor.version());
+    assertEquals(1025, descriptor.index());
+    assertEquals(1024 * 1024, descriptor.maxSegmentSize());
+    assertEquals(2048, descriptor.maxEntries());
+
+    assertEquals(0, descriptor.updated());
+    long time = System.currentTimeMillis();
+    descriptor.update(time);
+    assertEquals(time, descriptor.updated());
+  }
+
+  /**
+   * Tests copying the segment descriptor.
+   */
+  @Test
+  public void testDescriptorCopy() {
+    JournalSegmentDescriptor descriptor = JournalSegmentDescriptor.builder()
+        .withId(2)
+        .withIndex(1025)
+        .withMaxSegmentSize(1024 * 1024)
+        .withMaxEntries(2048)
+        .build();
+
+    long time = System.currentTimeMillis();
+    descriptor.update(time);
+
+    descriptor = descriptor.copyTo(ByteBuffer.allocate(JournalSegmentDescriptor.BYTES));
+
+    assertEquals(2, descriptor.id());
+    assertEquals(JournalSegmentDescriptor.VERSION, descriptor.version());
+    assertEquals(1025, descriptor.index());
+    assertEquals(1024 * 1024, descriptor.maxSegmentSize());
+    assertEquals(2048, descriptor.maxEntries());
+    assertEquals(time, descriptor.updated());
+  }
+}
diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/JournalSegmentFileTest.java b/atomix-storage/src/test/java/io/atomix/storage/journal/JournalSegmentFileTest.java
new file mode 100644 (file)
index 0000000..114ae09
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import java.io.File;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Journal segment file test.
+ */
+public class JournalSegmentFileTest {
+
+  @Test
+  public void testIsSegmentFile() throws Exception {
+    assertTrue(JournalSegmentFile.isSegmentFile("foo", "foo-1.log"));
+    assertFalse(JournalSegmentFile.isSegmentFile("foo", "bar-1.log"));
+    assertTrue(JournalSegmentFile.isSegmentFile("foo", "foo-1-1.log"));
+  }
+
+  @Test
+  public void testCreateSegmentFile() throws Exception {
+    File file = JournalSegmentFile.createSegmentFile("foo", new File(System.getProperty("user.dir")), 1);
+    assertTrue(JournalSegmentFile.isSegmentFile("foo", file));
+  }
+
+}
diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/MappedJournalTest.java b/atomix-storage/src/test/java/io/atomix/storage/journal/MappedJournalTest.java
new file mode 100644 (file)
index 0000000..286c6df
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+/**
+ * Memory mapped journal test.
+ */
+public class MappedJournalTest extends AbstractJournalTest {
+    public MappedJournalTest(final int maxSegmentSize) {
+        super(StorageLevel.MAPPED, maxSegmentSize);
+    }
+}
diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/TestEntry.java b/atomix-storage/src/test/java/io/atomix/storage/journal/TestEntry.java
new file mode 100644 (file)
index 0000000..b549362
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import java.util.Arrays;
+
+import static com.google.common.base.MoreObjects.toStringHelper;
+
+/**
+ * Test entry.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+public class TestEntry {
+  private final byte[] bytes;
+
+  public TestEntry(int size) {
+    this(new byte[size]);
+  }
+
+  public TestEntry(byte[] bytes) {
+    this.bytes = bytes;
+  }
+
+  public byte[] bytes() {
+    return bytes;
+  }
+
+  @Override
+  public String toString() {
+    return toStringHelper(this)
+        .add("length", bytes.length)
+        .add("hash", Arrays.hashCode(bytes))
+        .toString();
+  }
+}
diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/TestEntrySerdes.java b/atomix-storage/src/test/java/io/atomix/storage/journal/TestEntrySerdes.java
new file mode 100644 (file)
index 0000000..8b04539
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2023 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import io.atomix.storage.journal.JournalSerdes.EntryInput;
+import io.atomix.storage.journal.JournalSerdes.EntryOutput;
+import io.atomix.storage.journal.JournalSerdes.EntrySerdes;
+import java.io.IOException;
+
+final class TestEntrySerdes implements EntrySerdes<TestEntry> {
+    private static final ByteArraySerdes BA_SERIALIZER = new ByteArraySerdes();
+
+    @Override
+    public TestEntry read(final EntryInput input) throws IOException {
+        return new TestEntry(BA_SERIALIZER.read(input));
+    }
+
+    @Override
+    public void write(final EntryOutput output, final TestEntry entry) throws IOException {
+        BA_SERIALIZER.write(output, entry.bytes());
+    }
+}
diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/index/SparseJournalIndexTest.java b/atomix-storage/src/test/java/io/atomix/storage/journal/index/SparseJournalIndexTest.java
new file mode 100644 (file)
index 0000000..b7cd38a
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2018-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal.index;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+/**
+ * Sparse journal index test.
+ */
+public class SparseJournalIndexTest {
+  @Test
+  public void testSparseJournalIndex() throws Exception {
+    JournalIndex index = new SparseJournalIndex(.2);
+    assertNull(index.lookup(1));
+    index.index(1, 2);
+    assertNull(index.lookup(1));
+    index.index(2, 4);
+    index.index(3, 6);
+    index.index(4, 8);
+    index.index(5, 10);
+    assertEquals(new Position(5, 10), index.lookup(5));
+    index.index(6, 12);
+    index.index(7, 14);
+    index.index(8, 16);
+    assertEquals(new Position(5, 10), index.lookup(8));
+    index.index(9, 18);
+    index.index(10, 20);
+    assertEquals(new Position(10, 20), index.lookup(10));
+    index.truncate(8);
+    assertEquals(new Position(5, 10), index.lookup(8));
+    assertEquals(new Position(5, 10), index.lookup(10));
+    index.truncate(4);
+    assertNull(index.lookup(4));
+    assertNull(index.lookup(8));
+
+    index = new SparseJournalIndex(.2);
+    assertNull(index.lookup(100));
+    index.index(101, 2);
+    assertNull(index.lookup(1));
+    index.index(102, 4);
+    index.index(103, 6);
+    index.index(104, 8);
+    index.index(105, 10);
+    assertEquals(new Position(105, 10), index.lookup(105));
+    index.index(106, 12);
+    index.index(107, 14);
+    index.index(108, 16);
+    assertEquals(new Position(105, 10), index.lookup(108));
+    index.index(109, 18);
+    index.index(110, 20);
+    assertEquals(new Position(110, 20), index.lookup(110));
+    index.truncate(108);
+    assertEquals(new Position(105, 10), index.lookup(108));
+    assertEquals(new Position(105, 10), index.lookup(110));
+    index.truncate(104);
+    assertNull(index.lookup(104));
+    assertNull(index.lookup(108));
+  }
+}
diff --git a/atomix-storage/src/test/java/io/atomix/utils/serializer/BufferAwareByteArrayOutputStreamTest.java b/atomix-storage/src/test/java/io/atomix/utils/serializer/BufferAwareByteArrayOutputStreamTest.java
new file mode 100644 (file)
index 0000000..a47d378
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+public class BufferAwareByteArrayOutputStreamTest {
+
+  @Test
+  public void testBufferSize() throws Exception {
+    BufferAwareByteArrayOutputStream outputStream = new BufferAwareByteArrayOutputStream(8);
+    assertEquals(8, outputStream.getBufferSize());
+    outputStream.write(new byte[]{1, 2, 3, 4, 5, 6, 7, 8});
+    assertEquals(8, outputStream.getBufferSize());
+    outputStream.write(new byte[]{1, 2, 3, 4, 5, 6, 7, 8});
+    assertEquals(16, outputStream.getBufferSize());
+    outputStream.reset();
+    assertEquals(16, outputStream.getBufferSize());
+  }
+}
diff --git a/atomix-storage/src/test/java/io/atomix/utils/serializer/KryoInputPoolTest.java b/atomix-storage/src/test/java/io/atomix/utils/serializer/KryoInputPoolTest.java
new file mode 100644 (file)
index 0000000..82a9629
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import com.esotericsoftware.kryo.io.Input;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+public class KryoInputPoolTest {
+
+  private KryoInputPool kryoInputPool;
+
+  @Before
+  public void setUp() throws Exception {
+    kryoInputPool = new KryoInputPool();
+  }
+
+  @Test
+  public void discardOutput() {
+    final Input[] result = new Input[2];
+    kryoInputPool.run(input -> {
+      result[0] = input;
+      return null;
+    }, KryoInputPool.MAX_POOLED_BUFFER_SIZE + 1);
+    kryoInputPool.run(input -> {
+      result[1] = input;
+      return null;
+    }, 0);
+    assertTrue(result[0] != result[1]);
+  }
+
+  @Test
+  public void recycleOutput() {
+    final Input[] result = new Input[2];
+    kryoInputPool.run(input -> {
+      assertEquals(0, input.position());
+      byte[] payload = new byte[]{1, 2, 3, 4};
+      input.setBuffer(payload);
+      assertArrayEquals(payload, input.readBytes(4));
+      result[0] = input;
+      return null;
+    }, 0);
+    assertNull(result[0].getInputStream());
+    assertEquals(0, result[0].position());
+    kryoInputPool.run(input -> {
+      result[1] = input;
+      return null;
+    }, 0);
+    assertTrue(result[0] == result[1]);
+  }
+}
diff --git a/atomix-storage/src/test/java/io/atomix/utils/serializer/KryoOutputPoolTest.java b/atomix-storage/src/test/java/io/atomix/utils/serializer/KryoOutputPoolTest.java
new file mode 100644 (file)
index 0000000..04d55d6
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import com.esotericsoftware.kryo.io.Output;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class KryoOutputPoolTest {
+
+  private KryoOutputPool kryoOutputPool;
+
+  @Before
+  public void setUp() throws Exception {
+    kryoOutputPool = new KryoOutputPool();
+  }
+
+  @Test
+  public void discardOutput() {
+    final Output[] result = new Output[2];
+    kryoOutputPool.run(output -> {
+      result[0] = output;
+      return null;
+    }, KryoOutputPool.MAX_POOLED_BUFFER_SIZE + 1);
+    kryoOutputPool.run(output -> {
+      result[1] = output;
+      return null;
+    }, 0);
+    assertTrue(result[0] != result[1]);
+  }
+
+  @Test
+  public void recycleOutput() {
+    final ByteArrayOutput[] result = new ByteArrayOutput[2];
+    kryoOutputPool.run(output -> {
+      output.writeInt(1);
+      assertEquals(Integer.BYTES, output.position());
+      result[0] = output;
+      return null;
+    }, 0);
+    assertEquals(0, result[0].position());
+    assertEquals(0, result[0].getByteArrayOutputStream().size());
+    kryoOutputPool.run(output -> {
+      assertEquals(0, output.position());
+      result[1] = output;
+      return null;
+    }, 0);
+    assertTrue(result[0] == result[1]);
+  }
+}
diff --git a/atomix-storage/src/test/resources/logback.xml b/atomix-storage/src/test/resources/logback.xml
new file mode 100644 (file)
index 0000000..41f8f99
--- /dev/null
@@ -0,0 +1,29 @@
+<!--
+  ~ Copyright 2017-present Open Networking Laboratory
+  ~
+  ~ Licensed under the Apache License, Version 2.0 (the "License");
+  ~ you may not use this file except in compliance with the License.
+  ~ You may obtain a copy of the License at
+  ~
+  ~ http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+<configuration>
+    <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+        <encoder>
+            <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n
+            </pattern>
+        </encoder>
+    </appender>
+
+    <logger name="io.atomix.storage" level="INFO" />
+
+    <root level="${root.logging.level:-INFO}">
+        <appender-ref ref="STDOUT" />
+    </root>
+</configuration>
\ No newline at end of file
index 3ca170074a1b480b69b2abb827599ac4019cbc7a..52fde3264014c0a8ffc7c9a737aa02ebd969f1d3 100644 (file)
@@ -11,7 +11,7 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-parent</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../../opendaylight/md-sal/parent</relativePath>
   </parent>
 
index 769ef1d0576aa64e0adb3deba280658d4a00a1b4..1595fb8111407437a739247dbb7cb264f12ab0d9 100644 (file)
@@ -12,7 +12,7 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-parent</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../../opendaylight/md-sal/parent</relativePath>
   </parent>
 
@@ -20,6 +20,11 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
   <packaging>bundle</packaging>
 
   <dependencies>
+    <dependency>
+      <groupId>com.github.spotbugs</groupId>
+      <artifactId>spotbugs-annotations</artifactId>
+      <optional>true</optional>
+    </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>benchmark-api</artifactId>
@@ -46,8 +51,8 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
       <optional>true</optional>
     </dependency>
     <dependency>
-      <groupId>javax.annotation</groupId>
-      <artifactId>javax.annotation-api</artifactId>
+      <groupId>jakarta.annotation</groupId>
+      <artifactId>jakarta.annotation-api</artifactId>
       <optional>true</optional>
     </dependency>
   </dependencies>
index e6c9548d5f9bcc72bdcaa91519eb5649eb5ca572..444ec2fc752040c357190b688ea71463a438e14e 100644 (file)
@@ -28,10 +28,10 @@ public final class BaListBuilder {
         List<OuterList> outerList = new ArrayList<>(outerElements);
         for (int j = 0; j < outerElements; j++) {
             outerList.add(new OuterListBuilder()
-                                .setId(j)
-                                .setInnerList(buildInnerList(j, innerElements))
-                                .withKey(new OuterListKey(j))
-                                .build());
+                .setId(j)
+                .setInnerList(buildInnerList(j, innerElements))
+                .withKey(new OuterListKey(j))
+                .build());
         }
         return outerList;
     }
@@ -39,14 +39,14 @@ public final class BaListBuilder {
     private static Map<InnerListKey, InnerList> buildInnerList(final int index, final int elements) {
         Builder<InnerListKey, InnerList> innerList = ImmutableMap.builderWithExpectedSize(elements);
 
-        final String itemStr = "Item-" + String.valueOf(index) + "-";
+        final String itemStr = "Item-" + index + "-";
         for (int i = 0; i < elements; i++) {
             final InnerListKey key = new InnerListKey(i);
             innerList.put(key, new InnerListBuilder()
-                                .withKey(key)
-                                .setName(i)
-                                .setValue(itemStr + String.valueOf(i))
-                                .build());
+                .withKey(key)
+                .setName(i)
+                .setValue(itemStr + i)
+                .build());
         }
         return innerList.build();
     }
index 50eb0d4e8bbafab7c1723a6ba5a6234a862ef395..bddf80e8803e008ac028644dfd17418629f09c87 100644 (file)
@@ -7,6 +7,7 @@
  */
 package org.opendaylight.dsbenchmark;
 
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.Random;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
@@ -27,6 +28,7 @@ public abstract class DatastoreAbstractWriter {
     protected int txOk = 0;
     protected int txError = 0;
 
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "'this' passed to logging")
     public DatastoreAbstractWriter(final StartTestInput.Operation oper,
             final int outerListElem, final int innerListElem, final long writesPerTx, final DataStore dataStore) {
         this.outerListElem = outerListElem;
@@ -50,18 +52,7 @@ public abstract class DatastoreAbstractWriter {
     }
 
     protected LogicalDatastoreType getDataStoreType() {
-        final LogicalDatastoreType dsType;
-        if (dataStore == DataStore.CONFIG) {
-            dsType = LogicalDatastoreType.CONFIGURATION;
-        } else if (dataStore == DataStore.OPERATIONAL) {
-            dsType = LogicalDatastoreType.OPERATIONAL;
-        } else {
-            if (rn.nextBoolean() == true) {
-                dsType = LogicalDatastoreType.OPERATIONAL;
-            } else {
-                dsType = LogicalDatastoreType.CONFIGURATION;
-            }
-        }
-        return dsType;
+        return dataStore == DataStore.CONFIG || dataStore != DataStore.OPERATIONAL && !rn.nextBoolean()
+            ? LogicalDatastoreType.CONFIGURATION : LogicalDatastoreType.OPERATIONAL;
     }
 }
index 68868643782482d37a8263a4b4294271f69ecb36..7bdc4d7768b9710d3a4a4d89922a93f598abd500 100644 (file)
@@ -13,12 +13,11 @@ import java.util.List;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterList;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.outer.list.InnerList;
 import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.SystemMapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.builder.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 
 public final class DomListBuilder {
     // Inner List Qname identifiers for yang model's 'name' and 'value'
@@ -33,27 +32,28 @@ public final class DomListBuilder {
     }
 
     public static List<MapEntryNode> buildOuterList(final int outerElements, final int innerElements) {
-        List<MapEntryNode> outerList = new ArrayList<>(outerElements);
+        final var outerList = new ArrayList<MapEntryNode>(outerElements);
         for (int j = 0; j < outerElements; j++) {
-            outerList.add(ImmutableNodes.mapEntryBuilder()
-                                .withNodeIdentifier(NodeIdentifierWithPredicates.of(OuterList.QNAME, OL_ID, j))
-                                .withChild(ImmutableNodes.leafNode(OL_ID, j))
-                                .withChild(buildInnerList(j, innerElements))
-                                .build());
+            outerList.add(ImmutableNodes.newMapEntryBuilder()
+                .withNodeIdentifier(NodeIdentifierWithPredicates.of(OuterList.QNAME, OL_ID, j))
+                .withChild(ImmutableNodes.leafNode(OL_ID, j))
+                .withChild(buildInnerList(j, innerElements))
+                .build());
         }
         return outerList;
     }
 
     private static MapNode buildInnerList(final int index, final int elements) {
-        CollectionNodeBuilder<MapEntryNode, SystemMapNode> innerList = ImmutableNodes.mapNodeBuilder(InnerList.QNAME);
+        final var innerList = ImmutableNodes.newSystemMapBuilder()
+            .withNodeIdentifier(new NodeIdentifier(InnerList.QNAME));
 
-        final String itemStr = "Item-" + String.valueOf(index) + "-";
+        final String itemStr = "Item-" + index + "-";
         for (int i = 0; i < elements; i++) {
-            innerList.addChild(ImmutableNodes.mapEntryBuilder()
-                                .withNodeIdentifier(NodeIdentifierWithPredicates.of(InnerList.QNAME, IL_NAME, i))
-                                .withChild(ImmutableNodes.leafNode(IL_NAME, i))
-                                .withChild(ImmutableNodes.leafNode(IL_VALUE, itemStr + String.valueOf(i)))
-                                .build());
+            innerList.addChild(ImmutableNodes.newMapEntryBuilder()
+                .withNodeIdentifier(NodeIdentifierWithPredicates.of(InnerList.QNAME, IL_NAME, i))
+                .withChild(ImmutableNodes.leafNode(IL_NAME, i))
+                .withChild(ImmutableNodes.leafNode(IL_VALUE, itemStr + String.valueOf(i)))
+                .build());
         }
         return innerList.build();
     }
index 0bcc5811844d4c8fdecf2682d9b9441067903350..32f849596e16ca52f84959b89a92fdc704ec981b 100644 (file)
@@ -35,10 +35,11 @@ import org.opendaylight.mdsal.binding.api.RpcProviderService;
 import org.opendaylight.mdsal.binding.api.WriteTransaction;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.mdsal.dom.api.DOMDataBroker;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.CleanupStore;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.CleanupStoreInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.CleanupStoreOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.CleanupStoreOutputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.DsbenchmarkService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTest;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestOutputBuilder;
@@ -63,17 +64,17 @@ import org.slf4j.LoggerFactory;
 @Singleton
 @Component(service = { })
 @RequireServiceComponentRuntime
-public final class DsbenchmarkProvider implements DsbenchmarkService, AutoCloseable {
+public final class DsbenchmarkProvider implements AutoCloseable {
     private static final Logger LOG = LoggerFactory.getLogger(DsbenchmarkProvider.class);
-    private static final InstanceIdentifier<TestExec> TEST_EXEC_IID =
-            InstanceIdentifier.builder(TestExec.class).build();
-    private static final InstanceIdentifier<TestStatus> TEST_STATUS_IID =
-            InstanceIdentifier.builder(TestStatus.class).build();
+    private static final InstanceIdentifier<TestExec> TEST_EXEC_IID = InstanceIdentifier.create(TestExec.class);
+    private static final InstanceIdentifier<TestStatus> TEST_STATUS_IID = InstanceIdentifier.create(TestStatus.class);
 
     private final AtomicReference<ExecStatus> execStatus = new AtomicReference<>(ExecStatus.Idle);
     private final DsbenchmarkListenerProvider listenerProvider;
-    private final DOMDataBroker domDataBroker;  // Async DOM Broker for use with all DOM operations
-    private final DataBroker dataBroker; // Async Binding-Aware Broker for use in tx chains
+    // Async DOM Broker for use with all DOM operations
+    private final DOMDataBroker domDataBroker;
+    // Async Binding-Aware Broker for use in tx chains;
+    private final DataBroker dataBroker;
     private final Registration rpcReg;
 
     private long testsCompleted = 0;
@@ -95,7 +96,7 @@ public final class DsbenchmarkProvider implements DsbenchmarkService, AutoClosea
             LOG.warn("Working around Bugs 8829 and 6793 by ignoring exception from setTestOperData", e);
         }
 
-        rpcReg = rpcService.registerRpcImplementation(DsbenchmarkService.class, this);
+        rpcReg = rpcService.registerRpcImplementations((StartTest) this::startTest, (CleanupStore) this::cleanupStore);
         LOG.info("DsbenchmarkProvider initiated");
     }
 
@@ -107,24 +108,22 @@ public final class DsbenchmarkProvider implements DsbenchmarkService, AutoClosea
         LOG.info("DsbenchmarkProvider closed");
     }
 
-    @Override
-    public ListenableFuture<RpcResult<CleanupStoreOutput>> cleanupStore(final CleanupStoreInput input) {
+    private ListenableFuture<RpcResult<CleanupStoreOutput>> cleanupStore(final CleanupStoreInput input) {
         cleanupTestStore();
         LOG.debug("Data Store cleaned up");
         return Futures.immediateFuture(RpcResultBuilder.success(new CleanupStoreOutputBuilder().build()).build());
     }
 
-    @Override
     @SuppressWarnings("checkstyle:illegalCatch")
-    public ListenableFuture<RpcResult<StartTestOutput>> startTest(final StartTestInput input) {
+    private ListenableFuture<RpcResult<StartTestOutput>> startTest(final StartTestInput input) {
         LOG.info("Starting the data store benchmark test, input: {}", input);
 
         // Check if there is a test in progress
-        if (execStatus.compareAndSet(ExecStatus.Idle, ExecStatus.Executing) == false) {
+        if (!execStatus.compareAndSet(ExecStatus.Idle, ExecStatus.Executing)) {
             LOG.info("Test in progress");
             return RpcResultBuilder.success(new StartTestOutputBuilder()
-                    .setStatus(StartTestOutput.Status.TESTINPROGRESS)
-                    .build()).buildFuture();
+                .setStatus(StartTestOutput.Status.TESTINPROGRESS)
+                .build()).buildFuture();
         }
 
         // Cleanup data that may be left over from a previous test run
@@ -153,11 +152,11 @@ public final class DsbenchmarkProvider implements DsbenchmarkService, AutoClosea
             testsCompleted++;
 
         } catch (final Exception e) {
-            LOG.error("Test error: {}", e.toString());
+            LOG.error("Test error", e);
             execStatus.set(ExecStatus.Idle);
             return RpcResultBuilder.success(new StartTestOutputBuilder()
-                    .setStatus(StartTestOutput.Status.FAILED)
-                    .build()).buildFuture();
+                .setStatus(StartTestOutput.Status.FAILED)
+                .build()).buildFuture();
         }
 
         LOG.info("Test finished");
@@ -249,43 +248,37 @@ public final class DsbenchmarkProvider implements DsbenchmarkService, AutoClosea
                         retVal = new SimpletxBaWrite(dataBroker, oper, outerListElem,
                                 innerListElem, writesPerTx, dataStore);
                     }
+                } else if (StartTestInput.Operation.DELETE == oper) {
+                    retVal = new SimpletxDomDelete(domDataBroker, outerListElem,
+                            innerListElem, writesPerTx, dataStore);
+                } else if (StartTestInput.Operation.READ == oper) {
+                    retVal = new SimpletxDomRead(domDataBroker, outerListElem,
+                            innerListElem, writesPerTx, dataStore);
                 } else {
-                    if (StartTestInput.Operation.DELETE == oper) {
-                        retVal = new SimpletxDomDelete(domDataBroker, outerListElem,
-                                innerListElem, writesPerTx, dataStore);
-                    } else if (StartTestInput.Operation.READ == oper) {
-                        retVal = new SimpletxDomRead(domDataBroker, outerListElem,
-                                innerListElem, writesPerTx, dataStore);
-                    } else {
-                        retVal = new SimpletxDomWrite(domDataBroker, oper, outerListElem,
-                                innerListElem, writesPerTx, dataStore);
-                    }
+                    retVal = new SimpletxDomWrite(domDataBroker, oper, outerListElem,
+                            innerListElem, writesPerTx, dataStore);
                 }
-            } else {
-                if (dataFormat == StartTestInput.DataFormat.BINDINGAWARE) {
-                    if (StartTestInput.Operation.DELETE == oper) {
-                        retVal = new TxchainBaDelete(dataBroker, outerListElem,
-                                innerListElem, writesPerTx, dataStore);
-                    } else if (StartTestInput.Operation.READ == oper) {
-                        retVal = new TxchainBaRead(dataBroker, outerListElem,
-                                innerListElem,writesPerTx, dataStore);
-                    } else {
-                        retVal = new TxchainBaWrite(dataBroker, oper, outerListElem,
-                                innerListElem, writesPerTx, dataStore);
-                    }
+            } else if (dataFormat == StartTestInput.DataFormat.BINDINGAWARE) {
+                if (StartTestInput.Operation.DELETE == oper) {
+                    retVal = new TxchainBaDelete(dataBroker, outerListElem,
+                            innerListElem, writesPerTx, dataStore);
+                } else if (StartTestInput.Operation.READ == oper) {
+                    retVal = new TxchainBaRead(dataBroker, outerListElem,
+                            innerListElem,writesPerTx, dataStore);
                 } else {
-                    if (StartTestInput.Operation.DELETE == oper) {
-                        retVal = new TxchainDomDelete(domDataBroker, outerListElem,
-                                innerListElem, writesPerTx, dataStore);
-                    } else if (StartTestInput.Operation.READ == oper) {
-                        retVal = new TxchainDomRead(domDataBroker, outerListElem,
-                                innerListElem, writesPerTx, dataStore);
-
-                    } else {
-                        retVal = new TxchainDomWrite(domDataBroker, oper, outerListElem,
-                                innerListElem,writesPerTx, dataStore);
-                    }
+                    retVal = new TxchainBaWrite(dataBroker, oper, outerListElem,
+                            innerListElem, writesPerTx, dataStore);
                 }
+            } else if (StartTestInput.Operation.DELETE == oper) {
+                retVal = new TxchainDomDelete(domDataBroker, outerListElem,
+                        innerListElem, writesPerTx, dataStore);
+            } else if (StartTestInput.Operation.READ == oper) {
+                retVal = new TxchainDomRead(domDataBroker, outerListElem,
+                        innerListElem, writesPerTx, dataStore);
+
+            } else {
+                retVal = new TxchainDomWrite(domDataBroker, oper, outerListElem,
+                        innerListElem,writesPerTx, dataStore);
             }
         } finally {
             execStatus.set(ExecStatus.Idle);
index 75523eff7a7f61dc0bb093bde2563596ccd3ec5f..34c2bfdb68c3984dde2acc600946b594dc8b0f20 100644 (file)
@@ -7,15 +7,11 @@
  */
 package org.opendaylight.dsbenchmark.listener;
 
-import java.util.Collection;
+import java.util.List;
 import java.util.concurrent.atomic.AtomicInteger;
-import org.opendaylight.mdsal.binding.api.DataObjectModification;
-import org.opendaylight.mdsal.binding.api.DataObjectModification.ModificationType;
 import org.opendaylight.mdsal.binding.api.DataTreeChangeListener;
 import org.opendaylight.mdsal.binding.api.DataTreeModification;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.PathArgument;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -25,8 +21,7 @@ public class DsbenchmarkListener implements DataTreeChangeListener<TestExec> {
     private final AtomicInteger numDataChanges = new AtomicInteger(0);
 
     @Override
-    public void onDataTreeChanged(
-            final Collection<DataTreeModification<TestExec>> changes) {
+    public void onDataTreeChanged(final List<DataTreeModification<TestExec>> changes) {
         // Since we're registering the same DsbenchmarkListener object for both
         // OPERATIONAL and CONFIG, the onDataTreeChanged() method can be called
         // from different threads, and we need to use atomic counters.
@@ -40,20 +35,19 @@ public class DsbenchmarkListener implements DataTreeChangeListener<TestExec> {
     }
 
     private static synchronized void logDataTreeChangeEvent(final int eventNum,
-            final Collection<DataTreeModification<TestExec>> changes) {
+            final List<DataTreeModification<TestExec>> changes) {
         LOG.debug("DsbenchmarkListener-onDataTreeChanged: Event {}", eventNum);
 
-        for (DataTreeModification<TestExec> change : changes) {
-            final DataObjectModification<TestExec> rootNode = change.getRootNode();
-            final ModificationType modType = rootNode.getModificationType();
-            final PathArgument changeId = rootNode.getIdentifier();
-            final Collection<? extends DataObjectModification<? extends DataObject>> modifications =
-                    rootNode.getModifiedChildren();
+        for (var change : changes) {
+            final var rootNode = change.getRootNode();
+            final var modType = rootNode.modificationType();
+            final var changeId = rootNode.step();
+            final var modifications = rootNode.modifiedChildren();
 
             LOG.debug("    changeId {}, modType {}, mods: {}", changeId, modType, modifications.size());
 
-            for (DataObjectModification<? extends DataObject> mod : modifications) {
-                LOG.debug("      mod-getDataAfter: {}", mod.getDataAfter());
+            for (var mod : modifications) {
+                LOG.debug("      mod-getDataAfter: {}", mod.dataAfter());
             }
         }
     }
index 6bc931ca118526b488eb988e6a347ffa34f3520a..20e271ad9ac58779b0c4031692d87dece85f5a42 100644 (file)
@@ -15,7 +15,7 @@ import org.opendaylight.mdsal.binding.api.DataBroker;
 import org.opendaylight.mdsal.binding.api.DataTreeIdentifier;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -24,7 +24,8 @@ public class DsbenchmarkListenerProvider {
     private static final Logger LOG = LoggerFactory.getLogger(DsbenchmarkListenerProvider.class);
     private static final InstanceIdentifier<TestExec> TEST_EXEC_IID =
             InstanceIdentifier.builder(TestExec.class).build();
-    private final List<ListenerRegistration<DsbenchmarkListener>> listeners = new ArrayList<>();
+    private final List<DsbenchmarkListener> listeners = new ArrayList<>();
+    private final List<Registration> registrations = new ArrayList<>();
     private final DataBroker dataBroker;
 
     public DsbenchmarkListenerProvider(final DataBroker dataBroker) {
@@ -34,11 +35,12 @@ public class DsbenchmarkListenerProvider {
 
     public void createAndRegisterListeners(final int numListeners) {
         for (int i = 0; i < numListeners; i++) {
-            DsbenchmarkListener listener = new DsbenchmarkListener();
-            listeners.add(dataBroker.registerDataTreeChangeListener(
-                    DataTreeIdentifier.create(LogicalDatastoreType.CONFIGURATION, TEST_EXEC_IID), listener));
-            listeners.add(dataBroker.registerDataTreeChangeListener(
-                    DataTreeIdentifier.create(LogicalDatastoreType.OPERATIONAL, TEST_EXEC_IID), listener));
+            var listener = new DsbenchmarkListener();
+            listeners.add(listener);
+            registrations.add(dataBroker.registerTreeChangeListener(
+                    DataTreeIdentifier.of(LogicalDatastoreType.CONFIGURATION, TEST_EXEC_IID), listener));
+            registrations.add(dataBroker.registerTreeChangeListener(
+                    DataTreeIdentifier.of(LogicalDatastoreType.OPERATIONAL, TEST_EXEC_IID), listener));
 
         }
         LOG.debug("DsbenchmarkListenerProvider created {} listeneres", numListeners);
@@ -47,8 +49,8 @@ public class DsbenchmarkListenerProvider {
     public long getDataChangeCount() {
         long dataChanges = 0;
 
-        for (ListenerRegistration<DsbenchmarkListener> listenerRegistration : listeners) {
-            dataChanges += listenerRegistration.getInstance().getNumDataChanges();
+        for (var listener : listeners) {
+            dataChanges += listener.getNumDataChanges();
         }
         LOG.debug("DsbenchmarkListenerProvider , total data changes {}", dataChanges);
         return dataChanges;
@@ -57,11 +59,14 @@ public class DsbenchmarkListenerProvider {
     public long getEventCountAndDestroyListeners() {
         long totalEvents = 0;
 
-        for (ListenerRegistration<DsbenchmarkListener> listenerRegistration : listeners) {
-            totalEvents += listenerRegistration.getInstance().getNumEvents();
-            listenerRegistration.close();
+        registrations.forEach(Registration::close);
+        registrations.clear();
+
+        for (var listener : listeners) {
+            totalEvents += listener.getNumEvents();
         }
         listeners.clear();
+
         LOG.debug("DsbenchmarkListenerProvider destroyed listeneres, total events {}", totalEvents);
         return totalEvents;
     }
index aae96750a9ee80a94d4582c07d412a527e8e5eb2..1f6b7f988421214299805af03b40f51322771a0c 100644 (file)
@@ -64,7 +64,7 @@ public class SimpletxBaRead extends DatastoreAbstractWriter {
                 try {
                     optionalDataObject = submitFuture.get();
                     if (optionalDataObject != null && optionalDataObject.isPresent()) {
-                        OuterList outerList = optionalDataObject.get();
+                        OuterList outerList = optionalDataObject.orElseThrow();
 
                         String[] objectsArray = new String[outerList.getInnerList().size()];
 
@@ -77,7 +77,7 @@ public class SimpletxBaRead extends DatastoreAbstractWriter {
                         }
                         for (int i = 0; i < outerList.getInnerList().size(); i++) {
                             String itemStr = objectsArray[i];
-                            if (!itemStr.contentEquals("Item-" + String.valueOf(l) + "-" + String.valueOf(i))) {
+                            if (!itemStr.contentEquals("Item-" + l + "-" + i)) {
                                 LOG.error("innerList: name: {}, value: {}", i, itemStr);
                                 break;
                             }
index 8893a70574a4ab97e8245d3474edb5c38b7999b7..5395868aeca487f7e0ba6fbc89cc72b1837d0676 100644 (file)
@@ -5,16 +5,15 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.dsbenchmark.simpletx;
 
+import static java.util.Objects.requireNonNull;
+
 import java.util.List;
 import java.util.concurrent.ExecutionException;
 import org.opendaylight.dsbenchmark.BaListBuilder;
 import org.opendaylight.dsbenchmark.DatastoreAbstractWriter;
 import org.opendaylight.mdsal.binding.api.DataBroker;
-import org.opendaylight.mdsal.binding.api.WriteTransaction;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec;
@@ -25,31 +24,31 @@ import org.slf4j.LoggerFactory;
 
 public class SimpletxBaWrite extends DatastoreAbstractWriter {
     private static final Logger LOG = LoggerFactory.getLogger(SimpletxBaWrite.class);
+
     private final DataBroker dataBroker;
-    private List<OuterList> list;
+    private List<OuterList> list = null;
 
     public SimpletxBaWrite(final DataBroker dataBroker, final StartTestInput.Operation oper,
             final int outerListElem, final int innerListElem, final long writesPerTx, final DataStore dataStore) {
         super(oper, outerListElem, innerListElem, writesPerTx, dataStore);
-        this.dataBroker = dataBroker;
+        this.dataBroker = requireNonNull(dataBroker);
         LOG.debug("Created SimpletxBaWrite");
     }
 
     @Override
     public void createList() {
-        list = BaListBuilder.buildOuterList(this.outerListElem, this.innerListElem);
+        list = BaListBuilder.buildOuterList(outerListElem, innerListElem);
     }
 
     @Override
     public void executeList() {
-        final LogicalDatastoreType dsType = getDataStoreType();
+        final var dsType = getDataStoreType();
 
-        WriteTransaction tx = dataBroker.newWriteOnlyTransaction();
+        var tx = dataBroker.newWriteOnlyTransaction();
         long writeCnt = 0;
 
-        for (OuterList element : this.list) {
-            InstanceIdentifier<OuterList> iid = InstanceIdentifier.create(TestExec.class)
-                                                    .child(OuterList.class, element.key());
+        for (var element : list) {
+            final var iid = InstanceIdentifier.create(TestExec.class).child(OuterList.class, element.key());
             if (oper == StartTestInput.Operation.PUT) {
                 tx.put(dsType, iid, element);
             } else {
@@ -80,5 +79,4 @@ public class SimpletxBaWrite extends DatastoreAbstractWriter {
             }
         }
     }
-
 }
index 4e9a59a7441cea886bc99eb15d3ac45bed946765..d46a769b09a1da7435085b50d83b796fbab58fb0 100644 (file)
@@ -66,7 +66,7 @@ public class SimpletxDomRead extends DatastoreAbstractWriter {
                 try {
                     Optional<NormalizedNode> optionalDataObject = submitFuture.get();
                     if (optionalDataObject != null && optionalDataObject.isPresent()) {
-                        NormalizedNode ret = optionalDataObject.get();
+                        NormalizedNode ret = optionalDataObject.orElseThrow();
                         LOG.trace("optionalDataObject is {}", ret);
                         txOk++;
                     } else {
index f4cdbef26d1fcc5c281da621db4ab0615d225acb..a57773125f9e707dca4ec3b30394d9896b490e04 100644 (file)
@@ -5,16 +5,15 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.dsbenchmark.simpletx;
 
+import static java.util.Objects.requireNonNull;
+
 import java.util.List;
 import java.util.concurrent.ExecutionException;
 import org.opendaylight.dsbenchmark.DatastoreAbstractWriter;
 import org.opendaylight.dsbenchmark.DomListBuilder;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.mdsal.dom.api.DOMDataBroker;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec;
@@ -27,33 +26,32 @@ import org.slf4j.LoggerFactory;
 
 public class SimpletxDomWrite extends DatastoreAbstractWriter {
     private static final Logger LOG = LoggerFactory.getLogger(SimpletxDomWrite.class);
-    private final DOMDataBroker domDataBroker;
-    private List<MapEntryNode> list;
 
-    public SimpletxDomWrite(final DOMDataBroker domDataBroker, final StartTestInput.Operation oper,
+    private final DOMDataBroker dataBroker;
+    private List<MapEntryNode> list = null;
+
+    public SimpletxDomWrite(final DOMDataBroker dataBroker, final StartTestInput.Operation oper,
             final int outerListElem, final int innerListElem, final long putsPerTx, final DataStore dataStore) {
         super(oper, outerListElem, innerListElem, putsPerTx, dataStore);
-        this.domDataBroker = domDataBroker;
+        this.dataBroker = requireNonNull(dataBroker);
         LOG.debug("Created SimpletxDomWrite");
     }
 
     @Override
     public void createList() {
-        list = DomListBuilder.buildOuterList(this.outerListElem, this.innerListElem);
+        list = DomListBuilder.buildOuterList(outerListElem, innerListElem);
     }
 
     @Override
     public void executeList() {
-        final LogicalDatastoreType dsType = getDataStoreType();
-        final YangInstanceIdentifier pid =
-                YangInstanceIdentifier.builder().node(TestExec.QNAME).node(OuterList.QNAME).build();
+        final var dsType = getDataStoreType();
+        final var pid = YangInstanceIdentifier.of(TestExec.QNAME, OuterList.QNAME);
 
-        DOMDataTreeWriteTransaction tx = domDataBroker.newWriteOnlyTransaction();
+        var tx = dataBroker.newWriteOnlyTransaction();
         long writeCnt = 0;
 
-        for (MapEntryNode element : this.list) {
-            YangInstanceIdentifier yid =
-                    pid.node(NodeIdentifierWithPredicates.of(OuterList.QNAME, element.getIdentifier().asMap()));
+        for (var element : list) {
+            final var yid = pid.node(NodeIdentifierWithPredicates.of(OuterList.QNAME, element.name().asMap()));
 
             if (oper == StartTestInput.Operation.PUT) {
                 tx.put(dsType, yid, element);
@@ -71,7 +69,7 @@ public class SimpletxDomWrite extends DatastoreAbstractWriter {
                     LOG.error("Transaction failed", e);
                     txError++;
                 }
-                tx = domDataBroker.newWriteOnlyTransaction();
+                tx = dataBroker.newWriteOnlyTransaction();
                 writeCnt = 0;
             }
         }
index bd947acddb13ea595757e72557dac0871f3a1de0..41b233774a4c8fbf921bcdd5fed29e9bf1b73494 100644 (file)
@@ -12,9 +12,7 @@ import com.google.common.util.concurrent.MoreExecutors;
 import java.util.concurrent.ExecutionException;
 import org.opendaylight.dsbenchmark.DatastoreAbstractWriter;
 import org.opendaylight.mdsal.binding.api.DataBroker;
-import org.opendaylight.mdsal.binding.api.Transaction;
 import org.opendaylight.mdsal.binding.api.TransactionChain;
-import org.opendaylight.mdsal.binding.api.TransactionChainListener;
 import org.opendaylight.mdsal.binding.api.WriteTransaction;
 import org.opendaylight.mdsal.common.api.CommitInfo;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
@@ -24,10 +22,11 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchm
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterList;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterListKey;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class TxchainBaDelete extends DatastoreAbstractWriter implements TransactionChainListener {
+public class TxchainBaDelete extends DatastoreAbstractWriter implements FutureCallback<Empty> {
     private static final Logger LOG = LoggerFactory.getLogger(TxchainBaDelete.class);
     private final DataBroker bindingDataBroker;
 
@@ -53,7 +52,8 @@ public class TxchainBaDelete extends DatastoreAbstractWriter implements Transact
     @Override
     public void executeList() {
         final LogicalDatastoreType dsType = getDataStoreType();
-        final TransactionChain chain = bindingDataBroker.createMergingTransactionChain(this);
+        final TransactionChain chain = bindingDataBroker.createMergingTransactionChain();
+        chain.addCallback(this);
 
         WriteTransaction tx = chain.newWriteOnlyTransaction();
         int txSubmitted = 0;
@@ -104,13 +104,12 @@ public class TxchainBaDelete extends DatastoreAbstractWriter implements Transact
     }
 
     @Override
-    public void onTransactionChainFailed(final TransactionChain chain, final Transaction transaction,
-            final Throwable cause) {
-        LOG.error("Broken chain {} in TxchainBaDelete, transaction {}", chain, transaction.getIdentifier(), cause);
+    public void onFailure(final Throwable cause) {
+        LOG.error("Broken chain in TxchainBaDelete", cause);
     }
 
     @Override
-    public void onTransactionChainSuccessful(final TransactionChain chain) {
-        LOG.debug("TxchainBaDelete closed successfully, chain {}", chain);
+    public void onSuccess(final Empty chain) {
+        LOG.debug("TxchainBaDelete closed successfully");
     }
 }
index 414ad159af6700d8eb1cbc52ec03e50b1908b20b..a45599566643f8ba05a5ca9f9416338c75c70d6c 100644 (file)
@@ -13,9 +13,6 @@ import java.util.concurrent.ExecutionException;
 import org.opendaylight.dsbenchmark.DatastoreAbstractWriter;
 import org.opendaylight.mdsal.binding.api.DataBroker;
 import org.opendaylight.mdsal.binding.api.ReadTransaction;
-import org.opendaylight.mdsal.binding.api.Transaction;
-import org.opendaylight.mdsal.binding.api.TransactionChain;
-import org.opendaylight.mdsal.binding.api.TransactionChainListener;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore;
@@ -27,7 +24,7 @@ import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class TxchainBaRead extends DatastoreAbstractWriter implements TransactionChainListener {
+public class TxchainBaRead extends DatastoreAbstractWriter {
     private static final Logger LOG = LoggerFactory.getLogger(TxchainBaRead.class);
     private final DataBroker bindingDataBroker;
 
@@ -64,19 +61,19 @@ public class TxchainBaRead extends DatastoreAbstractWriter implements Transactio
                 try {
                     Optional<OuterList> optionalDataObject = submitFuture.get();
                     if (optionalDataObject != null && optionalDataObject.isPresent()) {
-                        OuterList outerList = optionalDataObject.get();
+                        OuterList outerList = optionalDataObject.orElseThrow();
 
-                        String[] objectsArray = new String[outerList.getInnerList().size()];
-                        for (InnerList innerList : outerList.getInnerList().values()) {
+                        String[] objectsArray = new String[outerList.nonnullInnerList().size()];
+                        for (InnerList innerList : outerList.nonnullInnerList().values()) {
                             if (objectsArray[innerList.getName()] != null) {
                                 LOG.error("innerList: DUPLICATE name: {}, value: {}", innerList.getName(),
                                     innerList.getValue());
                             }
                             objectsArray[innerList.getName()] = innerList.getValue();
                         }
-                        for (int i = 0; i < outerList.getInnerList().size(); i++) {
+                        for (int i = 0; i < outerList.nonnullInnerList().size(); i++) {
                             String itemStr = objectsArray[i];
-                            if (!itemStr.contentEquals("Item-" + String.valueOf(l) + "-" + String.valueOf(i))) {
+                            if (!itemStr.contentEquals("Item-" + l + "-" + i)) {
                                 LOG.error("innerList: name: {}, value: {}", i, itemStr);
                                 break;
                             }
@@ -92,15 +89,4 @@ public class TxchainBaRead extends DatastoreAbstractWriter implements Transactio
             }
         }
     }
-
-    @Override
-    public void onTransactionChainFailed(final TransactionChain chain, final Transaction transaction,
-            final Throwable cause) {
-        LOG.error("Broken chain {} in TxchainBaDelete, transaction {}", chain, transaction.getIdentifier(), cause);
-    }
-
-    @Override
-    public void onTransactionChainSuccessful(final TransactionChain chain) {
-        LOG.debug("TxchainBaDelete closed successfully, chain {}", chain);
-    }
 }
index b96c1763b7a423ae54c8d4e8832ae77b08a54d4f..3640f607dbf9145997963b253f624298c11c8609 100644 (file)
@@ -7,6 +7,8 @@
  */
 package org.opendaylight.dsbenchmark.txchain;
 
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.MoreExecutors;
 import java.util.List;
@@ -14,50 +16,47 @@ import java.util.concurrent.ExecutionException;
 import org.opendaylight.dsbenchmark.BaListBuilder;
 import org.opendaylight.dsbenchmark.DatastoreAbstractWriter;
 import org.opendaylight.mdsal.binding.api.DataBroker;
-import org.opendaylight.mdsal.binding.api.Transaction;
-import org.opendaylight.mdsal.binding.api.TransactionChain;
-import org.opendaylight.mdsal.binding.api.TransactionChainListener;
-import org.opendaylight.mdsal.binding.api.WriteTransaction;
 import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.Operation;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterList;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class TxchainBaWrite extends DatastoreAbstractWriter implements TransactionChainListener {
+public class TxchainBaWrite extends DatastoreAbstractWriter implements FutureCallback<Empty> {
     private static final Logger LOG = LoggerFactory.getLogger(TxchainBaWrite.class);
-    private final DataBroker bindingDataBroker;
-    private List<OuterList> list;
 
-    public TxchainBaWrite(final DataBroker bindingDataBroker, final Operation oper,
-            final int outerListElem, final int innerListElem, final long writesPerTx, final DataStore dataStore) {
+    private final DataBroker dataBroker;
+    private List<OuterList> list = null;
+
+    public TxchainBaWrite(final DataBroker dataBroker, final Operation oper, final int outerListElem,
+            final int innerListElem, final long writesPerTx, final DataStore dataStore) {
         super(oper, outerListElem, innerListElem, writesPerTx, dataStore);
-        this.bindingDataBroker = bindingDataBroker;
+        this.dataBroker = requireNonNull(dataBroker);
         LOG.debug("Created TxchainBaWrite");
     }
 
     @Override
     public void createList() {
-        list = BaListBuilder.buildOuterList(this.outerListElem, this.innerListElem);
+        list = BaListBuilder.buildOuterList(outerListElem, innerListElem);
     }
 
     @Override
     public void executeList() {
-        final TransactionChain chain = bindingDataBroker.createMergingTransactionChain(this);
-        final LogicalDatastoreType dsType = getDataStoreType();
+        final var chain = dataBroker.createMergingTransactionChain();
+        chain.addCallback(this);
+        final var dsType = getDataStoreType();
 
-        WriteTransaction tx = chain.newWriteOnlyTransaction();
+        var tx = chain.newWriteOnlyTransaction();
         int txSubmitted = 0;
         int writeCnt = 0;
 
-        for (OuterList element : this.list) {
-            InstanceIdentifier<OuterList> iid = InstanceIdentifier.create(TestExec.class)
-                                                    .child(OuterList.class, element.key());
+        for (var element : list) {
+            final var iid = InstanceIdentifier.create(TestExec.class).child(OuterList.class, element.key());
 
             if (oper == StartTestInput.Operation.PUT) {
                 tx.put(dsType, iid, element);
@@ -106,14 +105,12 @@ public class TxchainBaWrite extends DatastoreAbstractWriter implements Transacti
     }
 
     @Override
-    public void onTransactionChainFailed(final TransactionChain chain, final Transaction transaction,
-            final Throwable cause) {
-        LOG.error("Broken chain {} in DatastoreBaAbstractWrite, transaction {}", chain, transaction.getIdentifier(),
-            cause);
+    public void onFailure(final Throwable cause) {
+        LOG.error("Broken chain in DatastoreBaAbstractWrite", cause);
     }
 
     @Override
-    public void onTransactionChainSuccessful(final TransactionChain chain) {
-        LOG.debug("DatastoreBaAbstractWrite closed successfully, chain {}", chain);
+    public void onSuccess(final Empty result) {
+        LOG.debug("DatastoreBaAbstractWrite closed successfully");
     }
 }
index 75dee6267346960e00ddc8480b8f243043fbf5be..c0280c6b02b3642b548a65142e66de9b0b224352 100644 (file)
@@ -14,21 +14,20 @@ import org.opendaylight.dsbenchmark.DatastoreAbstractWriter;
 import org.opendaylight.mdsal.common.api.CommitInfo;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.mdsal.dom.api.DOMDataBroker;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeTransaction;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
 import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterList;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class TxchainDomDelete extends DatastoreAbstractWriter implements DOMTransactionChainListener {
+public class TxchainDomDelete extends DatastoreAbstractWriter implements FutureCallback<Empty> {
     private static final Logger LOG = LoggerFactory.getLogger(TxchainDomDelete.class);
     private final DOMDataBroker domDataBroker;
 
@@ -57,7 +56,8 @@ public class TxchainDomDelete extends DatastoreAbstractWriter implements DOMTran
         final org.opendaylight.yangtools.yang.common.QName olId = QName.create(OuterList.QNAME, "id");
         final YangInstanceIdentifier pid =
                 YangInstanceIdentifier.builder().node(TestExec.QNAME).node(OuterList.QNAME).build();
-        final DOMTransactionChain chain = domDataBroker.createMergingTransactionChain(this);
+        final DOMTransactionChain chain = domDataBroker.createMergingTransactionChain();
+        chain.addCallback(this);
 
         DOMDataTreeWriteTransaction tx = chain.newWriteOnlyTransaction();
         int txSubmitted = 0;
@@ -108,13 +108,12 @@ public class TxchainDomDelete extends DatastoreAbstractWriter implements DOMTran
     }
 
     @Override
-    public void onTransactionChainFailed(final DOMTransactionChain chain, final DOMDataTreeTransaction transaction,
-            final Throwable cause) {
-        LOG.error("Broken chain {} in TxchainDomDelete, transaction {}", chain, transaction.getIdentifier(), cause);
+    public void onFailure(final Throwable cause) {
+        LOG.error("Broken chain in TxchainDomDelete", cause);
     }
 
     @Override
-    public void onTransactionChainSuccessful(final DOMTransactionChain chain) {
-        LOG.debug("TxchainDomDelete closed successfully, chain {}", chain);
+    public void onSuccess(final Empty result) {
+        LOG.debug("TxchainDomDelete closed successfully");
     }
 }
index 5c70edcd6c541425315034386362af11af66ee64..cccdb4ecdb66a5bcaac31ac5c708877ac130db39 100644 (file)
@@ -14,9 +14,6 @@ import org.opendaylight.dsbenchmark.DatastoreAbstractWriter;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.mdsal.dom.api.DOMDataBroker;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeTransaction;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec;
@@ -28,7 +25,7 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class TxchainDomRead extends DatastoreAbstractWriter implements DOMTransactionChainListener {
+public class TxchainDomRead extends DatastoreAbstractWriter {
     private static final Logger LOG = LoggerFactory.getLogger(TxchainDomRead.class);
     private final DOMDataBroker domDataBroker;
 
@@ -54,7 +51,7 @@ public class TxchainDomRead extends DatastoreAbstractWriter implements DOMTransa
     @Override
     public void executeList() {
         final LogicalDatastoreType dsType = getDataStoreType();
-        final org.opendaylight.yangtools.yang.common.QName olId = QName.create(OuterList.QNAME, "id");
+        final QName olId = QName.create(OuterList.QNAME, "id");
         final YangInstanceIdentifier pid =
                 YangInstanceIdentifier.builder().node(TestExec.QNAME).node(OuterList.QNAME).build();
 
@@ -75,15 +72,4 @@ public class TxchainDomRead extends DatastoreAbstractWriter implements DOMTransa
             }
         }
     }
-
-    @Override
-    public void onTransactionChainFailed(final DOMTransactionChain chain, final DOMDataTreeTransaction transaction,
-            final Throwable cause) {
-        LOG.error("Broken chain {} in TxchainDomDelete, transaction {}", chain, transaction.getIdentifier(), cause);
-    }
-
-    @Override
-    public void onTransactionChainSuccessful(final DOMTransactionChain chain) {
-        LOG.debug("TxchainDomDelete closed successfully, chain {}", chain);
-    }
 }
index 4f254a2ce99cfba935fcf4190d9ba18008a109f4..b50a7e0ad13c609e220b443ea381299ebeeff73d 100644 (file)
@@ -14,53 +14,49 @@ import java.util.concurrent.ExecutionException;
 import org.opendaylight.dsbenchmark.DatastoreAbstractWriter;
 import org.opendaylight.dsbenchmark.DomListBuilder;
 import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.mdsal.dom.api.DOMDataBroker;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterList;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class TxchainDomWrite extends DatastoreAbstractWriter implements DOMTransactionChainListener {
+public class TxchainDomWrite extends DatastoreAbstractWriter implements FutureCallback<Empty> {
     private static final Logger LOG = LoggerFactory.getLogger(TxchainDomWrite.class);
-    private final DOMDataBroker domDataBroker;
-    private List<MapEntryNode> list;
 
-    public TxchainDomWrite(final DOMDataBroker domDataBroker, final StartTestInput.Operation oper,
+    private final DOMDataBroker dataBroker;
+    private List<MapEntryNode> list = null;
+
+    public TxchainDomWrite(final DOMDataBroker dataBroker, final StartTestInput.Operation oper,
             final int outerListElem, final int innerListElem, final long writesPerTx, final DataStore dataStore) {
         super(oper, outerListElem, innerListElem, writesPerTx, dataStore);
-        this.domDataBroker = domDataBroker;
+        this.dataBroker = dataBroker;
         LOG.debug("Created TxchainDomWrite");
     }
 
     @Override
     public void createList() {
-        list = DomListBuilder.buildOuterList(this.outerListElem, this.innerListElem);
+        list = DomListBuilder.buildOuterList(outerListElem, innerListElem);
     }
 
     @Override
     public void executeList() {
-        final LogicalDatastoreType dsType = getDataStoreType();
-        final YangInstanceIdentifier pid =
-                YangInstanceIdentifier.builder().node(TestExec.QNAME).node(OuterList.QNAME).build();
-        final DOMTransactionChain chain = domDataBroker.createMergingTransactionChain(this);
+        final var dsType = getDataStoreType();
+        final var pid = YangInstanceIdentifier.of(TestExec.QNAME, OuterList.QNAME);
+        final var chain = dataBroker.createMergingTransactionChain();
+        chain.addCallback(this);
 
-        DOMDataTreeWriteTransaction tx = chain.newWriteOnlyTransaction();
+        var tx = chain.newWriteOnlyTransaction();
         int txSubmitted = 0;
         int writeCnt = 0;
 
-        for (MapEntryNode element : this.list) {
-            YangInstanceIdentifier yid =
-                    pid.node(NodeIdentifierWithPredicates.of(OuterList.QNAME, element.getIdentifier().asMap()));
+        for (var element : list) {
+            var yid = pid.node(NodeIdentifierWithPredicates.of(OuterList.QNAME, element.name().asMap()));
 
             if (oper == StartTestInput.Operation.PUT) {
                 tx.put(dsType, yid, element);
@@ -112,13 +108,12 @@ public class TxchainDomWrite extends DatastoreAbstractWriter implements DOMTrans
     }
 
     @Override
-    public void onTransactionChainFailed(final DOMTransactionChain chain, final DOMDataTreeTransaction transaction,
-            final Throwable cause) {
-        LOG.error("Broken chain {} in TxchainDomWrite, transaction {}", chain, transaction.getIdentifier(), cause);
+    public void onFailure(final Throwable cause) {
+        LOG.error("Broken chain in TxchainDomWrite", cause);
     }
 
     @Override
-    public void onTransactionChainSuccessful(final DOMTransactionChain chain) {
-        LOG.debug("Chain {} closed successfully", chain);
+    public void onSuccess(final Empty result) {
+        LOG.debug("Chain closed successfully");
     }
 }
index 519ba4efbf1b78dfb12996983775af9b0d2a5091..82b3c3aabf3ead4c528807bab530e0b04f8feb6a 100644 (file)
@@ -12,7 +12,7 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-parent</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../../opendaylight/md-sal/parent</relativePath>
   </parent>
 
@@ -38,8 +38,8 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
       <optional>true</optional>
     </dependency>
     <dependency>
-      <groupId>javax.annotation</groupId>
-      <artifactId>javax.annotation-api</artifactId>
+      <groupId>jakarta.annotation</groupId>
+      <artifactId>jakarta.annotation-api</artifactId>
       <optional>true</optional>
     </dependency>
   </dependencies>
index 021369acb0fe0178ee87853f569f313021217759..ee77a6c342ed841258ce086d3481255590aa06d1 100644 (file)
@@ -8,8 +8,11 @@
 package ntfbenchmark.impl;
 
 import org.opendaylight.mdsal.binding.api.NotificationPublishService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class NtfbenchBlockingProducer extends AbstractNtfbenchProducer {
+    private static final Logger LOG = LoggerFactory.getLogger(NtfbenchBlockingProducer.class);
 
     public NtfbenchBlockingProducer(final NotificationPublishService publishService, final int iterations,
             final int payloadSize) {
@@ -22,12 +25,13 @@ public class NtfbenchBlockingProducer extends AbstractNtfbenchProducer {
         int ntfOk = 0;
         int ntfError = 0;
 
-        for (int i = 0; i < this.iterations; i++) {
+        for (int i = 0; i < iterations; i++) {
             try {
-                this.publishService.putNotification(this.ntf);
+                publishService.putNotification(ntf);
                 ntfOk++;
             } catch (final Exception e) {
                 ntfError++;
+                LOG.debug("Failed to push notification", e);
             }
         }
 
index b0e239c4d45f99f483c7abbd1ae681d98498f723..4b44e1d73dbf2f25bffbd3b4de47f83c03e38f10 100644 (file)
@@ -28,9 +28,9 @@ public class NtfbenchNonblockingProducer extends AbstractNtfbenchProducer {
         int ntfOk = 0;
         int ntfError = 0;
         ListenableFuture<?> lastOkFuture = null;
-        for (int i = 0; i < this.iterations; i++) {
+        for (int i = 0; i < iterations; i++) {
             try {
-                final ListenableFuture<?> result = this.publishService.offerNotification(this.ntf);
+                final ListenableFuture<?> result = publishService.offerNotification(ntf);
                 if (NotificationPublishService.REJECTED == result) {
                     ntfError++;
                 } else {
@@ -50,9 +50,8 @@ public class NtfbenchNonblockingProducer extends AbstractNtfbenchProducer {
             try {
                 lastOkFuture.get();
             } catch (InterruptedException | ExecutionException e) {
-                throw new RuntimeException(e);
+                throw new IllegalStateException(e);
             }
         }
     }
-
 }
index d126b2774ae35bdb240a7ce850f3ff51f707b3c6..b57d37b99b0254395c54ef6ea648a8a0aef31273 100644 (file)
@@ -9,11 +9,10 @@ package ntfbenchmark.impl;
 
 import com.google.common.util.concurrent.Futures;
 import java.util.concurrent.Future;
+import org.opendaylight.mdsal.binding.api.NotificationService.Listener;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbench.payload.rev150709.Ntfbench;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbench.payload.rev150709.NtfbenchPayloadListener;
-
-public class NtfbenchTestListener implements NtfbenchPayloadListener {
 
+public class NtfbenchTestListener implements Listener<Ntfbench> {
     private final int expectedSize;
     private int received = 0;
 
@@ -22,7 +21,7 @@ public class NtfbenchTestListener implements NtfbenchPayloadListener {
     }
 
     @Override
-    public void onNtfbench(final Ntfbench notification) {
+    public void onNotification(final Ntfbench notification) {
         if (expectedSize == notification.getPayload().size()) {
             received++;
         }
index 1b16329d524b7210bec5a948599c0d8f95b1389b..8cce7448bcd0d7272cfcca06cafb5c052ac82c7f 100644 (file)
@@ -20,9 +20,8 @@ public class NtfbenchWTCListener extends NtfbenchTestListener {
     }
 
     @Override
-    public void onNtfbench(final Ntfbench notification) {
-        // TODO Auto-generated method stub
-        super.onNtfbench(notification);
+    public void onNotification(final Ntfbench notification) {
+        super.onNotification(notification);
         if (expectedCount == getReceived()) {
             allDone.set(null);
         }
index acd5af61436a9be62f886e8e1fdfa6372c286c6a..7c8e51f6431afaee79079c65833d180a710093a2 100644 (file)
@@ -12,7 +12,6 @@ import static java.util.Objects.requireNonNull;
 
 import com.google.common.util.concurrent.ListenableFuture;
 import java.util.ArrayList;
-import java.util.List;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
@@ -23,14 +22,15 @@ import javax.inject.Singleton;
 import org.opendaylight.mdsal.binding.api.NotificationPublishService;
 import org.opendaylight.mdsal.binding.api.NotificationService;
 import org.opendaylight.mdsal.binding.api.RpcProviderService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.NtfbenchmarkService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbench.payload.rev150709.Ntfbench;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.StartTest;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.StartTestInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.StartTestInput.ProducerType;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.StartTestOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.StartTestOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.TestStatus;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.TestStatusInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.TestStatusOutput;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
@@ -46,7 +46,7 @@ import org.slf4j.LoggerFactory;
 @Singleton
 @Component(service = {})
 @RequireServiceComponentRuntime
-public final class NtfbenchmarkProvider implements AutoCloseable, NtfbenchmarkService {
+public final class NtfbenchmarkProvider implements AutoCloseable {
     private static final Logger LOG = LoggerFactory.getLogger(NtfbenchmarkProvider.class);
     private static final int TEST_TIMEOUT = 5;
 
@@ -61,7 +61,7 @@ public final class NtfbenchmarkProvider implements AutoCloseable, NtfbenchmarkSe
             @Reference final RpcProviderService rpcService) {
         this.listenService = requireNonNull(listenService);
         this.publishService = requireNonNull(publishService);
-        reg = rpcService.registerRpcImplementation(NtfbenchmarkService.class, this);
+        reg = rpcService.registerRpcImplementations((TestStatus) this::testStatus, (StartTest) this::startTest);
         LOG.debug("NtfbenchmarkProvider initiated");
     }
 
@@ -73,20 +73,20 @@ public final class NtfbenchmarkProvider implements AutoCloseable, NtfbenchmarkSe
         LOG.info("NtfbenchmarkProvider closed");
     }
 
-    @Override
-    public ListenableFuture<RpcResult<StartTestOutput>> startTest(final StartTestInput input) {
+    private ListenableFuture<RpcResult<StartTestOutput>> startTest(final StartTestInput input) {
         final int producerCount = input.getProducers().intValue();
         final int listenerCount = input.getListeners().intValue();
         final int iterations = input.getIterations().intValue();
         final int payloadSize = input.getIterations().intValue();
 
-        final List<AbstractNtfbenchProducer> producers = new ArrayList<>(producerCount);
-        final List<ListenerRegistration<NtfbenchTestListener>> listeners = new ArrayList<>(listenerCount);
+        final var producers = new ArrayList<AbstractNtfbenchProducer>(producerCount);
         for (int i = 0; i < producerCount; i++) {
             producers.add(new NtfbenchBlockingProducer(publishService, iterations, payloadSize));
         }
         int expectedCntPerListener = producerCount * iterations;
 
+        final var listeners = new ArrayList<NtfbenchTestListener>(listenerCount);
+        final var registrations = new ArrayList<Registration>(listenerCount);
         for (int i = 0; i < listenerCount; i++) {
             final NtfbenchTestListener listener;
             if (input.getProducerType() == ProducerType.BLOCKING) {
@@ -94,7 +94,8 @@ public final class NtfbenchmarkProvider implements AutoCloseable, NtfbenchmarkSe
             } else {
                 listener = new NtfbenchTestListener(payloadSize);
             }
-            listeners.add(listenService.registerNotificationListener(listener));
+            listeners.add(listener);
+            registrations.add(listenService.registerListener(Ntfbench.class, listener));
         }
 
         try {
@@ -110,11 +111,11 @@ public final class NtfbenchmarkProvider implements AutoCloseable, NtfbenchmarkSe
             executor.shutdown();
             try {
                 executor.awaitTermination(TEST_TIMEOUT, TimeUnit.MINUTES);
-                for (ListenerRegistration<NtfbenchTestListener> listenerRegistration : listeners) {
-                    listenerRegistration.getInstance().getAllDone().get();
+                for (var listener : listeners) {
+                    listener.getAllDone().get();
                 }
             } catch (final InterruptedException | ExecutionException e) {
-                LOG.error("Out of time: test did not finish within the {} min deadline ", TEST_TIMEOUT);
+                LOG.error("Out of time: test did not finish within the {} min deadline ", TEST_TIMEOUT, e);
             }
 
             final long producerEndTime = System.nanoTime();
@@ -124,8 +125,8 @@ public final class NtfbenchmarkProvider implements AutoCloseable, NtfbenchmarkSe
             long allProducersOk = 0;
             long allProducersError = 0;
 
-            for (final ListenerRegistration<NtfbenchTestListener> listenerRegistration : listeners) {
-                allListeners += listenerRegistration.getInstance().getReceived();
+            for (var listener : listeners) {
+                allListeners += listener.getReceived();
             }
 
             final long listenerElapsedTime = producerEndTime - startTime;
@@ -149,14 +150,11 @@ public final class NtfbenchmarkProvider implements AutoCloseable, NtfbenchmarkSe
                 .build();
             return RpcResultBuilder.success(output).buildFuture();
         } finally {
-            for (final ListenerRegistration<NtfbenchTestListener> listenerRegistration : listeners) {
-                listenerRegistration.close();
-            }
+            registrations.forEach(Registration::close);
         }
     }
 
-    @Override
-    public ListenableFuture<RpcResult<TestStatusOutput>> testStatus(final TestStatusInput input) {
+    private ListenableFuture<RpcResult<TestStatusOutput>> testStatus(final TestStatusInput input) {
         throw new UnsupportedOperationException("Not implemented");
     }
 }
index 5d182fd2b180b34ef35d697ce4db2fb76e7df146..cf915b7bc29116a9f8e91e6389fabc22cc6652a3 100644 (file)
@@ -10,13 +10,13 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
   <parent>
     <groupId>org.opendaylight.odlparent</groupId>
     <artifactId>odlparent-lite</artifactId>
-    <version>9.0.12</version>
+    <version>13.0.11</version>
     <relativePath/>
   </parent>
 
   <groupId>org.opendaylight.controller</groupId>
   <artifactId>benchmark-aggregator</artifactId>
-  <version>5.0.0-SNAPSHOT</version>
+  <version>9.0.3-SNAPSHOT</version>
   <packaging>pom</packaging>
 
   <properties>
index ed020b7afa0b422e7ce2dc0963d1f4a483f0b37b..8eefdf66648207dbec42eb4f8940b94b2e70bf59 100644 (file)
@@ -8,39 +8,55 @@ terms of the Eclipse Public License v1.0 which accompanies this distribution,
 and is available at http://www.eclipse.org/legal/epl-v10.html
 -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>mdsal-parent</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
-    <relativePath>../../opendaylight/md-sal/parent</relativePath>
-  </parent>
+    <modelVersion>4.0.0</modelVersion>
+    <parent>
+        <groupId>org.opendaylight.controller</groupId>
+        <artifactId>mdsal-parent</artifactId>
+        <version>9.0.3-SNAPSHOT</version>
+        <relativePath>../../opendaylight/md-sal/parent</relativePath>
+    </parent>
 
-  <artifactId>rpcbenchmark</artifactId>
-  <packaging>bundle</packaging>
+    <artifactId>rpcbenchmark</artifactId>
+    <packaging>bundle</packaging>
 
-  <dependencies>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>benchmark-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-binding-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.osgi</groupId>
-      <artifactId>org.osgi.service.component.annotations</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.guicedee.services</groupId>
-      <artifactId>javax.inject</artifactId>
-      <optional>true</optional>
-    </dependency>
-    <dependency>
-      <groupId>javax.annotation</groupId>
-      <artifactId>javax.annotation-api</artifactId>
-      <optional>true</optional>
-    </dependency>
-  </dependencies>
+    <dependencies>
+        <dependency>
+            <groupId>com.guicedee.services</groupId>
+            <artifactId>javax.inject</artifactId>
+            <optional>true</optional>
+        </dependency>
+        <dependency>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>jakarta.annotation</groupId>
+            <artifactId>jakarta.annotation-api</artifactId>
+            <optional>true</optional>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>benchmark-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>yang-binding</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>mdsal-binding-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>concepts</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-common</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.osgi</groupId>
+            <artifactId>org.osgi.service.component.annotations</artifactId>
+        </dependency>
+    </dependencies>
 </project>
index 33328f9664629e184957bf8d8d7e3d34b035717e..34aa71ca262385af241e063f410f83e43b36afae 100644 (file)
@@ -14,21 +14,18 @@ import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchOut
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchInput;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchOutput;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchOutputBuilder;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RpcbenchPayloadService;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
 
-abstract class AbstractRpcbenchPayloadService implements RpcbenchPayloadService {
+abstract class AbstractRpcbenchPayloadService {
     private int numRpcs = 0;
 
-    @Override
-    public final ListenableFuture<RpcResult<GlobalRpcBenchOutput>> globalRpcBench(final GlobalRpcBenchInput input) {
+    final ListenableFuture<RpcResult<GlobalRpcBenchOutput>> globalRpcBench(final GlobalRpcBenchInput input) {
         numRpcs++;
         return RpcResultBuilder.success(new GlobalRpcBenchOutputBuilder(input).build()).buildFuture();
     }
 
-    @Override
-    public final ListenableFuture<RpcResult<RoutedRpcBenchOutput>> routedRpcBench(final RoutedRpcBenchInput input) {
+    final ListenableFuture<RpcResult<RoutedRpcBenchOutput>> routedRpcBench(final RoutedRpcBenchInput input) {
         numRpcs++;
         return RpcResultBuilder.success(new RoutedRpcBenchOutputBuilder(input).build()).buildFuture();
     }
index bbbf309d887f259281fb7085b2f499dd14abc58f..86e3a097bf08a32af53fc489086fffae9d145536 100644 (file)
@@ -9,26 +9,22 @@ package rpcbenchmark.impl;
 
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableMap.Builder;
-import java.util.Map;
 import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
 import java.util.concurrent.atomic.AtomicLong;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBench;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchInput;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchInputBuilder;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchOutput;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RpcbenchPayloadService;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.payload.Payload;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.payload.PayloadBuilder;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.payload.PayloadKey;
-import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class GlobalBindingRTCClient implements RTCClient {
     private static final Logger LOG = LoggerFactory.getLogger(GlobalBindingRTCClient.class);
 
-    private final RpcbenchPayloadService service;
+    private final GlobalRpcBench globalRpcBench;
     private final AtomicLong rpcOk = new AtomicLong(0);
     private final AtomicLong rpcError = new AtomicLong(0);
     private final GlobalRpcBenchInput inVal;
@@ -44,12 +40,8 @@ public class GlobalBindingRTCClient implements RTCClient {
         return rpcError.get();
     }
 
-    public GlobalBindingRTCClient(final RpcConsumerRegistry registry, final int inSize) {
-        if (registry != null) {
-            this.service = registry.getRpcService(RpcbenchPayloadService.class);
-        } else {
-            this.service = null;
-        }
+    public GlobalBindingRTCClient(final RpcService rpcService, final int inSize) {
+        globalRpcBench = rpcService.getRpc(GlobalRpcBench.class);
 
         this.inSize = inSize;
         Builder<PayloadKey, Payload> listVals = ImmutableMap.builderWithExpectedSize(inSize);
@@ -66,12 +58,12 @@ public class GlobalBindingRTCClient implements RTCClient {
         int error = 0;
 
         for (int i = 0; i < iterations; i++) {
-            Future<RpcResult<GlobalRpcBenchOutput>> output = service.globalRpcBench(inVal);
+            final var output = globalRpcBench.invoke(inVal);
             try {
-                RpcResult<GlobalRpcBenchOutput> rpcResult = output.get();
+                final var rpcResult = output.get();
 
                 if (rpcResult.isSuccessful()) {
-                    Map<PayloadKey, Payload> retVal = rpcResult.getResult().getPayload();
+                    final var retVal = rpcResult.getResult().getPayload();
                     if (retVal.size() == inSize) {
                         ok++;
                     }
@@ -94,5 +86,4 @@ public class GlobalBindingRTCClient implements RTCClient {
         // TODO Auto-generated method stub
 
     }
-
 }
index 67fde76865771b5d5135d0d2bf800092afeaa54f..78d8e4a1ba49456e11740d338278ce657a0f16e3 100644 (file)
@@ -8,7 +8,8 @@
 package rpcbenchmark.impl;
 
 import org.opendaylight.mdsal.binding.api.RpcProviderService;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RpcbenchPayloadService;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBench;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBench;
 import org.opendaylight.yangtools.concepts.Registration;
 import org.osgi.service.component.annotations.Reference;
 import org.slf4j.Logger;
@@ -20,7 +21,9 @@ final class GlobalBindingRTCServer extends AbstractRpcbenchPayloadService implem
     private final Registration reg;
 
     GlobalBindingRTCServer(@Reference final RpcProviderService rpcProvider) {
-        reg = rpcProvider.registerRpcImplementation(RpcbenchPayloadService.class, this);
+        reg = rpcProvider.registerRpcImplementations(
+            (GlobalRpcBench) this::globalRpcBench,
+            (RoutedRpcBench) this::routedRpcBench);
         LOG.debug("GlobalBindingRTCServer started");
     }
 
index 1982bebcd852e90898408d100bf1f1900228c103..a6384606ed233173b727ecacb229b5e1599538da 100644 (file)
@@ -7,9 +7,11 @@
  */
 package rpcbenchmark.impl;
 
+import java.util.List;
 import java.util.Set;
 import org.opendaylight.mdsal.binding.api.RpcProviderService;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RpcbenchPayloadService;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBench;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBench;
 import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 
@@ -17,7 +19,9 @@ final class RoutedBindingRTCServer extends AbstractRpcbenchPayloadService implem
     private final Registration reg;
 
     RoutedBindingRTCServer(final RpcProviderService rpcProvider, final Set<InstanceIdentifier<?>> paths) {
-        reg = rpcProvider.registerRpcImplementation(RpcbenchPayloadService.class, this, paths);
+        reg = rpcProvider.registerRpcImplementations(List.of(
+            (GlobalRpcBench) this::globalRpcBench,
+            (RoutedRpcBench) this::routedRpcBench), paths);
     }
 
     @Override
index d2c3ae339db2f936503a5b64638f857d5b502d55..bd5e83e8bf511286f0ff7f56c6042f8b64c0ce98 100644 (file)
@@ -16,11 +16,11 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicLong;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBench;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchInput;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchInputBuilder;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchOutput;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RpcbenchPayloadService;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.payload.Payload;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.payload.PayloadBuilder;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.payload.PayloadKey;
@@ -31,15 +31,15 @@ import org.slf4j.LoggerFactory;
 
 public class RoutedBindingRTClient implements RTCClient {
     private static final Logger LOG = LoggerFactory.getLogger(RoutedBindingRTClient.class);
-    private final RpcbenchPayloadService service;
+    private final RoutedRpcBench routedRpcBench;
     private final AtomicLong rpcOk = new AtomicLong(0);
     private final AtomicLong rpcError = new AtomicLong(0);
     private final List<RoutedRpcBenchInput> inVal = new ArrayList<>();
     private final int inSize;
 
-    public RoutedBindingRTClient(final RpcConsumerRegistry registry, final int inSize,
+    public RoutedBindingRTClient(final RpcService rpcService, final int inSize,
             final List<InstanceIdentifier<?>> routeIid) {
-        service = registry.getRpcService(RpcbenchPayloadService.class);
+        routedRpcBench = rpcService.getRpc(RoutedRpcBench.class);
         this.inSize = inSize;
 
         Builder<PayloadKey, Payload> listVals = ImmutableMap.builderWithExpectedSize(inSize);
@@ -72,7 +72,7 @@ public class RoutedBindingRTClient implements RTCClient {
         int rpcServerCnt = inVal.size();
         for (int i = 0; i < iterations; i++) {
             RoutedRpcBenchInput input = inVal.get(ThreadLocalRandom.current().nextInt(rpcServerCnt));
-            Future<RpcResult<RoutedRpcBenchOutput>> output = service.routedRpcBench(input);
+            Future<RpcResult<RoutedRpcBenchOutput>> output = routedRpcBench.invoke(input);
             try {
                 RpcResult<RoutedRpcBenchOutput> rpcResult = output.get();
 
index 306539ce661cbea2f8711836d3366d9de0f5b47e..c785e9b384134eb023833364961ecc68566e1eca 100644 (file)
@@ -21,15 +21,16 @@ import java.util.concurrent.atomic.AtomicReference;
 import javax.annotation.PreDestroy;
 import javax.inject.Inject;
 import javax.inject.Singleton;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
 import org.opendaylight.mdsal.binding.api.RpcProviderService;
+import org.opendaylight.mdsal.binding.api.RpcService;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RpcbenchRpcRoutes;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.rpcbench.rpc.routes.RpcRoute;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.rpcbench.rpc.routes.RpcRouteKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.RpcbenchmarkService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.StartTest;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.StartTestInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.StartTestOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.StartTestOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.TestStatus;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.TestStatusInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.TestStatusOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.TestStatusOutput.ExecStatus;
@@ -50,24 +51,24 @@ import org.slf4j.LoggerFactory;
 @Singleton
 @Component(service = {})
 @RequireServiceComponentRuntime
-public final class RpcbenchmarkProvider implements AutoCloseable, RpcbenchmarkService {
+public final class RpcbenchmarkProvider implements AutoCloseable {
     private static final Logger LOG = LoggerFactory.getLogger(RpcbenchmarkProvider.class);
     private static final int TEST_TIMEOUT = 5;
 
     private final AtomicReference<ExecStatus> execStatus = new AtomicReference<>(ExecStatus.Idle);
     private final RpcProviderService providerRegistry;
-    private final RpcConsumerRegistry consumerRegistry;
+    private final RpcService consumerRegistry;
     private final GlobalBindingRTCServer globalServer;
     private final Registration reg;
 
     @Inject
     @Activate
     public RpcbenchmarkProvider(@Reference final RpcProviderService providerRegistry,
-            @Reference final RpcConsumerRegistry consumerRegistry) {
+            @Reference final RpcService consumerRegistry) {
         this.providerRegistry = requireNonNull(providerRegistry);
         this.consumerRegistry = requireNonNull(consumerRegistry);
         globalServer = new GlobalBindingRTCServer(providerRegistry);
-        reg = providerRegistry.registerRpcImplementation(RpcbenchmarkService.class, this);
+        reg = providerRegistry.registerRpcImplementations((TestStatus) this::testStatus, (StartTest) this::startTest);
         LOG.info("RpcbenchmarkProvider initiated");
     }
 
@@ -80,8 +81,7 @@ public final class RpcbenchmarkProvider implements AutoCloseable, RpcbenchmarkSe
         LOG.info("RpcbenchmarkProvider closed");
     }
 
-    @Override
-    public ListenableFuture<RpcResult<StartTestOutput>> startTest(final StartTestInput input) {
+    private ListenableFuture<RpcResult<StartTestOutput>> startTest(final StartTestInput input) {
         LOG.debug("startTest {}", input);
 
         final RTCClient client;
@@ -149,8 +149,7 @@ public final class RpcbenchmarkProvider implements AutoCloseable, RpcbenchmarkSe
         }
     }
 
-    @Override
-    public ListenableFuture<RpcResult<TestStatusOutput>> testStatus(final TestStatusInput input) {
+    private ListenableFuture<RpcResult<TestStatusOutput>> testStatus(final TestStatusInput input) {
         LOG.info("testStatus");
         TestStatusOutput output = new TestStatusOutputBuilder()
                                         .setGlobalServerCnt(Uint32.valueOf(globalServer.getNumRpcs()))
index 54ca4c8741f4353e901820fa1161719b88851a4e..5d373d3f372a68cab3eca96abc31676191139213 100644 (file)
     <modelVersion>4.0.0</modelVersion>
     <parent>
         <groupId>org.opendaylight.mdsal</groupId>
-        <artifactId>dom-parent</artifactId>
-        <version>8.0.10</version>
+        <artifactId>bundle-parent</artifactId>
+        <version>13.0.1</version>
         <relativePath/>
     </parent>
 
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>bundle-parent</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <packaging>pom</packaging>
 
     <dependencyManagement>
@@ -25,7 +25,7 @@
             <dependency>
                 <groupId>org.opendaylight.controller</groupId>
                 <artifactId>controller-artifacts</artifactId>
-                <version>5.0.0-SNAPSHOT</version>
+                <version>9.0.3-SNAPSHOT</version>
                 <type>pom</type>
                 <scope>import</scope>
             </dependency>
             <dependency>
                 <groupId>org.scala-lang</groupId>
                 <artifactId>scala-library</artifactId>
-                <version>2.13.8</version>
+                <version>2.13.13</version>
             </dependency>
             <dependency>
                 <groupId>org.scala-lang</groupId>
                 <artifactId>scala-reflect</artifactId>
-                <version>2.13.8</version>
+                <version>2.13.13</version>
             </dependency>
             <dependency>
                 <groupId>org.scala-lang.modules</groupId>
@@ -54,7 +54,7 @@
             <dependency>
                 <groupId>org.scalatestplus</groupId>
                 <artifactId>junit-4-13_2.13</artifactId>
-                <version>3.2.5.0</version>
+                <version>3.2.13.0</version>
                 <scope>test</scope>
             </dependency>
 
             <dependency>
                 <groupId>com.typesafe</groupId>
                 <artifactId>config</artifactId>
-                <version>1.4.0</version>
+                <version>1.4.2</version>
             </dependency>
             <dependency>
                 <groupId>com.typesafe</groupId>
                 <artifactId>ssl-config-core_2.13</artifactId>
-                <version>0.4.2</version>
+                <version>0.4.3</version>
             </dependency>
 
             <!-- Akka testkit -->
             <dependency>
                 <groupId>com.typesafe.akka</groupId>
                 <artifactId>akka-testkit_2.13</artifactId>
-                <version>2.6.18</version>
+                <version>2.6.21</version>
                 <scope>test</scope>
                 <exclusions>
                     <exclusion>
@@ -87,7 +87,7 @@
             <dependency>
                 <groupId>com.typesafe.akka</groupId>
                 <artifactId>akka-actor-testkit-typed_2.13</artifactId>
-                <version>2.6.18</version>
+                <version>2.6.21</version>
                 <scope>test</scope>
                 <exclusions>
                     <exclusion>
             <dependency>
                 <groupId>com.typesafe.akka</groupId>
                 <artifactId>akka-persistence-tck_2.13</artifactId>
-                <version>2.6.18</version>
+                <version>2.6.21</version>
                 <scope>test</scope>
                 <exclusions>
                     <exclusion>
             <dependency>
                 <groupId>org.reactivestreams</groupId>
                 <artifactId>reactive-streams</artifactId>
-                <version>1.0.3</version>
+                <version>1.0.4</version>
             </dependency>
 
             <!-- Aeron, required by Akka -->
             <dependency>
                 <groupId>org.agrona</groupId>
                 <artifactId>agrona</artifactId>
-                <version>1.14.0</version>
+                <version>1.15.2</version>
             </dependency>
             <dependency>
                 <groupId>io.aeron</groupId>
                 <artifactId>aeron-client</artifactId>
-                <version>1.37.0</version>
+                <version>1.38.1</version>
             </dependency>
             <dependency>
                 <groupId>io.aeron</groupId>
                 <artifactId>aeron-driver</artifactId>
-                <version>1.37.0</version>
+                <version>1.38.1</version>
             </dependency>
         </dependencies>
     </dependencyManagement>
index 1555fc4e9ccbd6baf1af2f9f6f7106c0c6b5fb45..686cfefcde3dd6bc82db98bfd683e2738ef5ff9e 100644 (file)
@@ -28,11 +28,6 @@ The OpenDaylight Controller relies on the following technologies:
 The OpenDaylight Controller provides following model-driven subsystems
 as a foundation for Java applications:
 
--  :ref:`config_subsystem` - an activation,
-   dependency-injection and configuration framework, which allows
-   two-phase commits of configuration and dependency-injection, and
-   allows for run-time rewiring.
-
 -  :ref:`MD-SAL <mdsal_dev_guide>` - messaging and data storage
    functionality for data, notifications and RPCs modeled by application
    developers. MD-SAL uses YANG as the modeling for both interface and
@@ -885,15 +880,15 @@ RESTCONF operations overview
 RESTCONF supports **OPTIONS**, **GET**, **PUT**, **POST**, and
 **DELETE** operations. Request and response data can either be in the
 XML or JSON format. XML structures according to yang are defined at:
-`XML-YANG <http://tools.ietf.org/html/rfc6020>`__. JSON structures are
+`XML-YANG <https://www.rfc-editor.org/rfc/rfc6020>`__. JSON structures are
 defined at:
-`JSON-YANG <http://tools.ietf.org/html/draft-lhotka-netmod-yang-json-02>`__.
+`JSON-YANG <https://datatracker.ietf.org/doc/html/draft-lhotka-netmod-yang-json-02>`__.
 Data in the request must have a correctly set **Content-Type** field in
 the http header with the allowed value of the media type. The media type
 of the requested data has to be set in the **Accept** field. Get the
 media types for each resource by calling the OPTIONS operation. Most of
 the paths of the pathsRestconf endpoints use `Instance
-Identifier <https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Concepts#Instance_Identifier>`__.
+Identifier <https://wiki-archive.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Concepts#Instance_Identifier>`__.
 ``<identifier>`` is used in the explanation of the operations.
 
 | **<identifier>**
@@ -915,7 +910,7 @@ Identifier <https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Co
      be known which node is X (for example: C:X). For more details about
      encoding, see: `RESTCONF 02 - Encoding YANG Instance Identifiers in
      the Request
-     URI. <http://tools.ietf.org/html/draft-bierman-netconf-restconf-02#section-5.3.1>`__
+     URI. <https://datatracker.ietf.org/doc/html/draft-bierman-netconf-restconf-02#section-5.3.1>`__
 
 Mount point
 ~~~~~~~~~~~
@@ -927,7 +922,7 @@ Mount point
   point itself by using <identifier>/**yang-ext:mount**.
 | More information on how to actually use mountpoints is available at:
   `OpenDaylight
-  Controller:Config:Examples:Netconf <https://wiki.opendaylight.org/view/OpenDaylight_Controller:Config:Examples:Netconf>`__.
+  Controller:Config:Examples:Netconf <https://wiki-archive.opendaylight.org/view/OpenDaylight_Controller:Config:Examples:Netconf>`__.
 
 HTTP methods
 ~~~~~~~~~~~~
@@ -1091,7 +1086,7 @@ DELETE /restconf/config/<identifier>
 -  <identifier> points to a data node which must be removed.
 
 More information is available in the `RESTCONF
-RFC <http://tools.ietf.org/html/draft-bierman-netconf-restconf-02>`__.
+RFC <https://datatracker.ietf.org/doc/html/draft-bierman-netconf-restconf-02>`__.
 
 How RESTCONF works
 ~~~~~~~~~~~~~~~~~~
@@ -1370,543 +1365,3 @@ Something practical
 
     Status: 200 OK
 
-Websocket change event notification subscription tutorial
----------------------------------------------------------
-
-Subscribing to data change notifications makes it possible to obtain
-notifications about data manipulation (insert, change, delete) which are
-done on any specified **path** of any specified **datastore** with
-specific **scope**. In following examples *{odlAddress}* is address of
-server where ODL is running and *{odlPort}* is port on which
-OpenDaylight is running.
-
-Websocket notifications subscription process
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-In this section we will learn what steps need to be taken in order to
-successfully subscribe to data change event notifications.
-
-Create stream
-^^^^^^^^^^^^^
-
-In order to use event notifications you first need to call RPC that
-creates notification stream that you can later listen to. You need to
-provide three parameters to this RPC:
-
--  **path**: data store path that you plan to listen to. You can
-   register listener on containers, lists and leaves.
-
--  **datastore**: data store type. *OPERATIONAL* or *CONFIGURATION*.
-
--  **scope**: Represents scope of data change. Possible options are:
-
-   -  BASE: only changes directly to the data tree node specified in the
-      path will be reported
-
-   -  ONE: changes to the node and to direct child nodes will be
-      reported
-
-   -  SUBTREE: changes anywhere in the subtree starting at the node will
-      be reported
-
-The RPC to create the stream can be invoked via RESTCONF like this:
-
--  URI:
-   http://{odlAddress}:{odlPort}/restconf/operations/sal-remote:create-data-change-event-subscription
-
--  HEADER: Content-Type=application/json
-
--  OPERATION: POST
-
--  DATA:
-
-   .. code:: json
-
-       {
-           "input": {
-               "path": "/toaster:toaster/toaster:toasterStatus",
-               "sal-remote-augment:datastore": "OPERATIONAL",
-               "sal-remote-augment:scope": "ONE"
-           }
-       }
-
-The response should look something like this:
-
-.. code:: json
-
-    {
-        "output": {
-            "stream-name": "data-change-event-subscription/toaster:toaster/toaster:toasterStatus/datastore=CONFIGURATION/scope=SUBTREE"
-        }
-    }
-
-**stream-name** is important because you will need to use it when you
-subscribe to the stream in the next step.
-
-.. note::
-
-    Internally, this will create a new listener for *stream-name* if it
-    did not already exist.
-
-Subscribe to stream
-^^^^^^^^^^^^^^^^^^^
-
-In order to subscribe to stream and obtain WebSocket location you need
-to call *GET* on your stream path. The URI should generally be
-http://{odlAddress}:{odlPort}/restconf/streams/stream/{streamName},
-where *{streamName}* is the *stream-name* parameter contained in
-response from *create-data-change-event-subscription* RPC from the
-previous step.
-
--  URI:
-   http://{odlAddress}:{odlPort}/restconf/streams/stream/data-change-event-subscription/toaster:toaster/datastore=CONFIGURATION/scope=SUBTREE
-
--  OPERATION: GET
-
-The subscription call may be modified with the following query parameters defined in the RESTCONF RFC:
-
--  `filter <https://tools.ietf.org/html/draft-ietf-netconf-restconf-05#section-4.8.6>`__
-
--  `start-time <https://tools.ietf.org/html/draft-ietf-netconf-restconf-05#section-4.8.7>`__
-
--  `end-time <https://tools.ietf.org/html/draft-ietf-netconf-restconf-05#section-4.8.8>`__
-
-In addition, the following ODL extension query parameter is supported:
-
-:odl-leaf-nodes-only:
-  If this parameter is set to "true", create and update notifications will only
-  contain the leaf nodes modified instead of the entire subscription subtree.
-  This can help in reducing the size of the notifications.
-
-:odl-skip-notification-data:
-  If this parameter is set to "true", create and update notifications will only
-  contain modified leaf nodes without data.
-  This can help in reducing the size of the notifications.
-
-The expected response status is 200 OK and response body should be
-empty. You will get your WebSocket location from **Location** header of
-response. For example in our particular toaster example location header
-would have this value:
-*ws://{odlAddress}:8185/toaster:toaster/datastore=CONFIGURATION/scope=SUBTREE*
-
-.. note::
-
-    During this phase there is an internal check for to see if a
-    listener for the *stream-name* from the URI exists. If not, new a
-    new listener is registered with the DOM data broker.
-
-Receive notifications
-^^^^^^^^^^^^^^^^^^^^^
-
-You should now have a data change notification stream created and have
-location of a WebSocket. You can use this WebSocket to listen to data
-change notifications. To listen to notifications you can use a
-JavaScript client or if you are using chrome browser you can use the
-`Simple WebSocket
-Client <https://chrome.google.com/webstore/detail/simple-websocket-client/pfdhoblngboilpfeibdedpjgfnlcodoo>`__.
-
-Also, for testing purposes, there is simple Java application named
-WebSocketClient. The application is placed in the
-*-sal-rest-connector-classes.class* project. It accepts a WebSocket URI
-as and input parameter. After starting the utility (WebSocketClient
-class directly in Eclipse/InteliJ Idea) received notifications should be
-displayed in console.
-
-Notifications are always in XML format and look like this:
-
-.. code:: xml
-
-    <notification xmlns="urn:ietf:params:xml:ns:netconf:notification:1.0">
-        <eventTime>2014-09-11T09:58:23+02:00</eventTime>
-        <data-changed-notification xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:remote">
-            <data-change-event>
-                <path xmlns:meae="http://netconfcentral.org/ns/toaster">/meae:toaster</path>
-                <operation>updated</operation>
-                <data>
-                   <!-- updated data -->
-                </data>
-            </data-change-event>
-        </data-changed-notification>
-    </notification>
-
-Example use case
-~~~~~~~~~~~~~~~~
-
-The typical use case is listening to data change events to update web
-page data in real-time. In this tutorial we will be using toaster as the
-base.
-
-When you call *make-toast* RPC, it sets *toasterStatus* to "down" to
-reflect that the toaster is busy making toast. When it finishes,
-*toasterStatus* is set to "up" again. We will listen to this toaster
-status changes in data store and will reflect it on our web page in
-real-time thanks to WebSocket data change notification.
-
-Simple javascript client implementation
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-We will create simple JavaScript web application that will listen
-updates on *toasterStatus* leaf and update some element of our web page
-according to new toaster status state.
-
-Create stream
-^^^^^^^^^^^^^
-
-First you need to create stream that you are planing to subscribe to.
-This can be achieved by invoking "create-data-change-event-subscription"
-RPC on RESTCONF via AJAX request. You need to provide data store
-**path** that you plan to listen on, **data store type** and **scope**.
-If the request is successful you can extract the **stream-name** from
-the response and use that to subscribe to the newly created stream. The
-*{username}* and *{password}* fields represent your credentials that you
-use to connect to OpenDaylight via RESTCONF:
-
-.. note::
-
-    The default user name and password are "admin".
-
-.. code:: javascript
-
-    function createStream() {
-        $.ajax(
-            {
-                url: 'http://{odlAddress}:{odlPort}/restconf/operations/sal-remote:create-data-change-event-subscription',
-                type: 'POST',
-                headers: {
-                  'Authorization': 'Basic ' + btoa('{username}:{password}'),
-                  'Content-Type': 'application/json'
-                },
-                data: JSON.stringify(
-                    {
-                        'input': {
-                            'path': '/toaster:toaster/toaster:toasterStatus',
-                            'sal-remote-augment:datastore': 'OPERATIONAL',
-                            'sal-remote-augment:scope': 'ONE'
-                        }
-                    }
-                )
-            }).done(function (data) {
-                // this function will be called when ajax call is executed successfully
-                subscribeToStream(data.output['stream-name']);
-            }).fail(function (data) {
-                // this function will be called when ajax call fails
-                console.log("Create stream call unsuccessful");
-            })
-    }
-
-Subscribe to stream
-^^^^^^^^^^^^^^^^^^^
-
-The Next step is to subscribe to the stream. To subscribe to the stream
-you need to call *GET* on
-*http://{odlAddress}:{odlPort}/restconf/streams/stream/{stream-name}*.
-If the call is successful, you get WebSocket address for this stream in
-**Location** parameter inside response header. You can get response
-header by calling *getResponseHeader(\ *Location*)* on HttpRequest
-object inside *done()* function call:
-
-.. code:: javascript
-
-    function subscribeToStream(streamName) {
-        $.ajax(
-            {
-                url: 'http://{odlAddress}:{odlPort}/restconf/streams/stream/' + streamName;
-                type: 'GET',
-                headers: {
-                  'Authorization': 'Basic ' + btoa('{username}:{password}'),
-                }
-            }
-        ).done(function (data, textStatus, httpReq) {
-            // we need function that has http request object parameter in order to access response headers.
-            listenToNotifications(httpReq.getResponseHeader('Location'));
-        }).fail(function (data) {
-            console.log("Subscribe to stream call unsuccessful");
-        });
-    }
-
-Receive notifications
-^^^^^^^^^^^^^^^^^^^^^
-
-Once you got WebSocket server location you can now connect to it and
-start receiving data change events. You need to define functions that
-will handle events on WebSocket. In order to process incoming events
-from OpenDaylight you need to provide a function that will handle
-*onmessage* events. The function must have one parameter that represents
-the received event object. The event data will be stored in
-*event.data*. The data will be in an XML format that you can then easily
-parse using jQuery.
-
-.. code:: javascript
-
-    function listenToNotifications(socketLocation) {
-        try {
-            var notificatinSocket = new WebSocket(socketLocation);
-
-            notificatinSocket.onmessage = function (event) {
-                // we process our received event here
-                console.log('Received toaster data change event.');
-                $($.parseXML(event.data)).find('data-change-event').each(
-                    function (index) {
-                        var operation = $(this).find('operation').text();
-                        if (operation == 'updated') {
-                            // toaster status was updated so we call function that gets the value of toasterStatus leaf
-                            updateToasterStatus();
-                            return false;
-                        }
-                    }
-                );
-            }
-            notificatinSocket.onerror = function (error) {
-                console.log("Socket error: " + error);
-            }
-            notificatinSocket.onopen = function (event) {
-                console.log("Socket connection opened.");
-            }
-            notificatinSocket.onclose = function (event) {
-                console.log("Socket connection closed.");
-            }
-            // if there is a problem on socket creation we get exception (i.e. when socket address is incorrect)
-        } catch(e) {
-            alert("Error when creating WebSocket" + e );
-        }
-    }
-
-The *updateToasterStatus()* function represents function that calls
-*GET* on the path that was modified and sets toaster status in some web
-page element according to received data. After the WebSocket connection
-has been established you can test events by calling make-toast RPC via
-RESTCONF.
-
-.. note::
-
-    for more information about WebSockets in JavaScript visit `Writing
-    WebSocket client
-    applications <https://developer.mozilla.org/en-US/docs/WebSockets/Writing_WebSocket_client_applications>`__
-
-.. _config_subsystem:
-
-Config Subsystem
-----------------
-
-Overview
-~~~~~~~~
-
-The Controller configuration operation has three stages:
-
--  First, a Proposed configuration is created. Its target is to replace
-   the old configuration.
-
--  Second, the Proposed configuration is validated, and then committed.
-   If it passes validation successfully, the Proposed configuration
-   state will be changed to Validated.
-
--  Finally, a Validated configuration can be Committed, and the affected
-   modules can be reconfigured.
-
-In fact, each configuration operation is wrapped in a transaction. Once
-a transaction is created, it can be configured, that is to say, a user
-can abort the transaction during this stage. After the transaction
-configuration is done, it is committed to the validation stage. In this
-stage, the validation procedures are invoked. If one or more validations
-fail, the transaction can be reconfigured. Upon success, the second
-phase commit is invoked. If this commit is successful, the transaction
-enters the last stage, committed. After that, the desired modules are
-reconfigured. If the second phase commit fails, it means that the
-transaction is unhealthy - basically, a new configuration instance
-creation failed, and the application can be in an inconsistent state.
-
-.. figure:: ./images/configuration.jpg
-   :alt: Configuration states
-
-   Configuration states
-
-.. figure:: ./images/Transaction.jpg
-   :alt: Transaction states
-
-   Transaction states
-
-Validation
-~~~~~~~~~~
-
-To secure the consistency and safety of the new configuration and to
-avoid conflicts, the configuration validation process is necessary.
-Usually, validation checks the input parameters of a new configuration,
-and mostly verifies module-specific relationships. The validation
-procedure results in a decision on whether the proposed configuration is
-healthy.
-
-Dependency resolver
-~~~~~~~~~~~~~~~~~~~
-
-Since there can be dependencies between modules, a change in a module
-configuration can affect the state of other modules. Therefore, we need
-to verify whether dependencies on other modules can be resolved. The
-Dependency Resolver acts in a manner similar to dependency injectors.
-Basically, a dependency tree is built.
-
-APIs and SPIs
-~~~~~~~~~~~~~
-
-This section describes configuration system APIs and SPIs.
-
-SPIs
-^^^^
-
-**Module** org.opendaylight.controller.config.spi. Module is the common
-interface for all modules: every module must implement it. The module is
-designated to hold configuration attributes, validate them, and create
-instances of service based on the attributes. This instance must
-implement the AutoCloseable interface, owing to resources clean up. If
-the module was created from an already running instance, it contains an
-old instance of the module. A module can implement multiple services. If
-the module depends on other modules, setters need to be annotated with
-@RequireInterface.
-
-**Module creation**
-
-1. The module needs to be configured, set with all required attributes.
-
-2. The module is then moved to the commit stage for validation. If the
-   validation fails, the module attributes can be reconfigured.
-   Otherwise, a new instance is either created, or an old instance is
-   reconfigured. A module instance is identified by ModuleIdentifier,
-   consisting of the factory name and instance name.
-
-| **ModuleFactory** org.opendaylight.controller.config.spi. The
-  ModuleFactory interface must be implemented by each module factory.
-| A module factory can create a new module instance in two ways:
-
--  From an existing module instance
-
--  | An entirely new instance
-   | ModuleFactory can also return default modules, useful for
-     populating registry with already existing configurations. A module
-     factory implementation must have a globally unique name.
-
-APIs
-^^^^
-
-+--------------------------------------+--------------------------------------+
-| ConfigRegistry                       | Represents functionality provided by |
-|                                      | a configuration transaction (create, |
-|                                      | destroy module, validate, or abort   |
-|                                      | transaction).                        |
-+--------------------------------------+--------------------------------------+
-| ConfigTransactionController          | Represents functionality for         |
-|                                      | manipulating with configuration      |
-|                                      | transactions (begin, commit config). |
-+--------------------------------------+--------------------------------------+
-| RuntimeBeanRegistratorAwareConfiBean | The module implementing this         |
-|                                      | interface will receive               |
-|                                      | RuntimeBeanRegistrator before        |
-|                                      | getInstance is invoked.              |
-+--------------------------------------+--------------------------------------+
-
-Runtime APIs
-^^^^^^^^^^^^
-
-+--------------------------------------+--------------------------------------+
-| RuntimeBean                          | Common interface for all runtime     |
-|                                      | beans                                |
-+--------------------------------------+--------------------------------------+
-| RootRuntimeBeanRegistrator           | Represents functionality for root    |
-|                                      | runtime bean registration, which     |
-|                                      | subsequently allows hierarchical     |
-|                                      | registrations                        |
-+--------------------------------------+--------------------------------------+
-| HierarchicalRuntimeBeanRegistration  | Represents functionality for runtime |
-|                                      | bean registration and                |
-|                                      | unreregistration from hierarchy      |
-+--------------------------------------+--------------------------------------+
-
-JMX APIs
-^^^^^^^^
-
-| JMX API is purposed as a transition between the Client API and the JMX
-  platform.
-
-+--------------------------------------+--------------------------------------+
-| ConfigTransactionControllerMXBean    | Extends ConfigTransactionController, |
-|                                      | executed by Jolokia clients on       |
-|                                      | configuration transaction.           |
-+--------------------------------------+--------------------------------------+
-| ConfigRegistryMXBean                 | Represents entry point of            |
-|                                      | configuration management for         |
-|                                      | MXBeans.                             |
-+--------------------------------------+--------------------------------------+
-| Object names                         | Object Name is the pattern used in   |
-|                                      | JMX to locate JMX beans. It consists |
-|                                      | of domain and key properties (at     |
-|                                      | least one key-value pair). Domain is |
-|                                      | defined as                           |
-|                                      | "org.opendaylight.controller". The   |
-|                                      | only mandatory property is "type".   |
-+--------------------------------------+--------------------------------------+
-
-Use case scenarios
-^^^^^^^^^^^^^^^^^^
-
-| A few samples of successful and unsuccessful transaction scenarios
-  follow:
-
-**Successful commit scenario**
-
-1.  The user creates a transaction calling creteTransaction() method on
-    ConfigRegistry.
-
-2.  ConfigRegisty creates a transaction controller, and registers the
-    transaction as a new bean.
-
-3.  Runtime configurations are copied to the transaction. The user can
-    create modules and set their attributes.
-
-4.  The configuration transaction is to be committed.
-
-5.  The validation process is performed.
-
-6.  After successful validation, the second phase commit begins.
-
-7.  Modules proposed to be destroyed are destroyed, and their service
-    instances are closed.
-
-8.  Runtime beans are set to registrator.
-
-9.  The transaction controller invokes the method getInstance on each
-    module.
-
-10. The transaction is committed, and resources are either closed or
-    released.
-
-| **Validation failure scenario**
-| The transaction is the same as the previous case until the validation
-  process.
-
-1. If validation fails, (that is to day, illegal input attributes values
-   or dependency resolver failure), the validationException is thrown
-   and exposed to the user.
-
-2. The user can decide to reconfigure the transaction and commit again,
-   or abort the current transaction.
-
-3. On aborted transactions, TransactionController and JMXRegistrator are
-   properly closed.
-
-4. Unregistration event is sent to ConfigRegistry.
-
-Default module instances
-^^^^^^^^^^^^^^^^^^^^^^^^
-
-The configuration subsystem provides a way for modules to create default
-instances. A default instance is an instance of a module, that is
-created at the module bundle start-up (module becomes visible for
-configuration subsystem, for example, its bundle is activated in the
-OSGi environment). By default, no default instances are produced.
-
-The default instance does not differ from instances created later in the
-module life-cycle. The only difference is that the configuration for the
-default instance cannot be provided by the configuration subsystem. The
-module has to acquire the configuration for these instances on its own.
-It can be acquired from, for example, environment variables. After the
-creation of a default instance, it acts as a regular instance and fully
-participates in the configuration subsystem (It can be reconfigured or
-deleted in following transactions.).
diff --git a/docs/images/Transaction.jpg b/docs/images/Transaction.jpg
deleted file mode 100644 (file)
index 258710a..0000000
Binary files a/docs/images/Transaction.jpg and /dev/null differ
diff --git a/docs/images/configuration.jpg b/docs/images/configuration.jpg
deleted file mode 100644 (file)
index 3b07a2b..0000000
Binary files a/docs/images/configuration.jpg and /dev/null differ
index 1aa368380cf7f92010c534a0ad7b1e4885f2b9ea..22c11bcb3cb9aaf90e66592d343e3f425deaccf9 100644 (file)
     <parent>
         <groupId>org.opendaylight.odlparent</groupId>
         <artifactId>odlparent</artifactId>
-        <version>9.0.12</version>
+        <version>13.0.11</version>
         <relativePath/>
     </parent>
 
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>controller-docs</artifactId>
     <packaging>jar</packaging>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <name>${project.artifactId}</name>
     <description>Controller documentation</description>
 
     </dependencyManagement>
 
     <dependencies>
-        <!-- Config Subsystem remnants -->
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>netty-event-executor-config</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>netty-threadgroup-config</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>netty-timer-config</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>threadpool-config-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>threadpool-config-impl</artifactId>
-        </dependency>
-
         <!-- Clustered implementation -->
         <dependency>
             <groupId>org.opendaylight.controller</groupId>
         </dependency>
 
         <!-- Third-party dependencies -->
+        <dependency>
+            <groupId>com.github.spotbugs</groupId>
+            <artifactId>spotbugs-annotations</artifactId>
+        </dependency>
         <dependency>
             <groupId>com.guicedee.services</groupId>
             <artifactId>javax.inject</artifactId>
         </dependency>
         <dependency>
-            <groupId>javax.annotation</groupId>
-            <artifactId>javax.annotation-api</artifactId>
+            <groupId>jakarta.annotation</groupId>
+            <artifactId>jakarta.annotation-api</artifactId>
         </dependency>
         <dependency>
             <groupId>org.kohsuke.metainf-services</groupId>
         </dependency>
         <dependency>
             <groupId>org.osgi</groupId>
-            <artifactId>osgi.core</artifactId>
+            <artifactId>org.osgi.framework</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.osgi</groupId>
+            <artifactId>org.osgi.service.component</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.osgi</groupId>
+            <artifactId>org.osgi.service.component.annotations</artifactId>
         </dependency>
         <dependency>
             <groupId>org.osgi</groupId>
-            <artifactId>osgi.cmpn</artifactId>
+            <artifactId>org.osgi.service.metatype.annotations</artifactId>
         </dependency>
     </dependencies>
 
                 <configuration combine.children="append">
                     <links>
                         <link>https://junit.org/junit4/javadoc/4.13/</link>
-                        <link>http://hamcrest.org/JavaHamcrest/javadoc/2.2/</link>
-                        <link>http://google.github.io/truth/api/1.0.1/</link>
-                        <link>http://www.slf4j.org/apidocs/</link>
-                        <link>https://google.github.io/guava/releases/29.0-jre/api/docs/</link>
-                        <link>http://doc.akka.io/japi/akka/2.6.18/</link>
-                        <link>http://netty.io/4.1/api/</link>
-                        <link>https://commons.apache.org/proper/commons-lang/javadocs/api-2.6/</link>
-                        <link>https://commons.apache.org/proper/commons-lang/javadocs/api-3.9/</link>
-                        <link>https://commons.apache.org/proper/commons-codec/apidocs/</link>
+                        <link>https://hamcrest.org/JavaHamcrest/javadoc/2.2/</link>
+                        <link>https://www.slf4j.org/apidocs/</link>
+                        <link>https://guava.dev/releases/32.0.1-jre/api/docs/</link>
+                        <link>https://doc.akka.io/japi/akka/2.6/</link>
+                        <link>https://netty.io/4.1/api/</link>
+                        <link>https://commons.apache.org/proper/commons-lang/javadocs/api-release/</link>
 
-                        <link>https://www.javadoc.io/doc/org.opendaylight.odlparent/odlparent-docs/9.0.12/</link>
-                        <link>https://www.javadoc.io/doc/org.opendaylight.yangtools/yangtools-docs/7.0.12/</link>
-                        <link>https://www.javadoc.io/doc/org.opendaylight.mdsal/mdsal-docs/8.0.10/</link>
+                        <link>https://www.javadoc.io/doc/org.opendaylight.odlparent/odlparent-docs/13.0.11/</link>
+                        <link>https://www.javadoc.io/doc/org.opendaylight.yangtools/yangtools-docs/13.0.2/</link>
+                        <link>https://www.javadoc.io/doc/org.opendaylight.mdsal/mdsal-docs/13.0.1/</link>
                     </links>
                     <groups>
                         <group>
index f864c9db94cdf1659e1d7bbf9a66f5bc530659d9..6401adb5fe235bc7a5e3d2fd0b95a59e4c3665f5 100644 (file)
     <parent>
         <groupId>org.opendaylight.odlparent</groupId>
         <artifactId>feature-repo-parent</artifactId>
-        <version>9.0.12</version>
+        <version>13.0.11</version>
         <relativePath/>
     </parent>
 
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>features-controller-experimental</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <packaging>feature</packaging>
     <name>OpenDaylight :: Controller :: Experimental Features</name>
     <description>Controller Experimental Features</description>
     </dependencyManagement>
 
     <dependencies>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>odl-controller-exp-netty-config</artifactId>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
         <dependency>
             <groupId>org.opendaylight.controller</groupId>
             <artifactId>odl-toaster</artifactId>
index d562accfc90bd71d2a9d171bd68e378008ba80d6..fb095fe8a15a78ea5c7dd237bde0d96266b68f87 100644 (file)
     <parent>
         <groupId>org.opendaylight.odlparent</groupId>
         <artifactId>feature-repo-parent</artifactId>
-        <version>9.0.12</version>
+        <version>13.0.11</version>
         <relativePath/>
     </parent>
 
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>features-controller-testing</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <packaging>feature</packaging>
     <name>OpenDaylight :: Controller :: Features to support CSIT testing</name>
     <description>Controller CSIT Features</description>
index c652fb26b15f73d38fb48d4d10adb2f3755b2304..64d72cf7c7ac45ab52ee7d683551303c01584440 100644 (file)
     <parent>
         <groupId>org.opendaylight.odlparent</groupId>
         <artifactId>feature-repo-parent</artifactId>
-        <version>9.0.12</version>
+        <version>13.0.11</version>
         <relativePath/>
     </parent>
 
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>features-controller</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <packaging>feature</packaging>
     <name>OpenDaylight :: Controller :: Features</name>
     <description>Controller Production Features</description>
index e1f76bf47223c6a135395d7a593b23952bc46e04..672ac82c36119d5c792c81dd7233d2a7bc20cf0b 100644 (file)
@@ -11,7 +11,7 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>single-feature-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../single-feature-parent</relativePath>
     </parent>
 
index 7829413d47569b18588c819daa202fddba47343d..69d3b61ee2fe5c257ed76899660d47f2da2be1bd 100644 (file)
@@ -8,6 +8,6 @@
  -->
 <features xmlns="http://karaf.apache.org/xmlns/features/v1.2.0" name="odl-clustering-test-app-${project.version}">
     <feature name="odl-clustering-test-app" version="${project.version}">
-        <feature version="[8,9)">odl-mdsal-model-rfc6991</feature>
+        <feature version="[13,14)">odl-mdsal-model-rfc6991</feature>
     </feature>
 </features>
index 458ed4446bdfb6109cb6f616543207ba3de8d27e..b1f43ef15b37042a937b070d1896c45cd331e86e 100644 (file)
@@ -14,7 +14,7 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>single-feature-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../single-feature-parent</relativePath>
     </parent>
 
index e0f6329f4d0e5f17067f01a64532f3f55117fcbb..4e7493fb0bb0dda0bde9b8d180a3f709c78d5bf8 100644 (file)
@@ -2,14 +2,14 @@
 <features xmlns="http://karaf.apache.org/xmlns/features/v1.6.0" name="odl-controller-akka">
     <feature version="0.0.0">
         <feature>odl-controller-scala</feature>
-        <bundle>mvn:com.typesafe/config/1.4.0</bundle>
-        <bundle>mvn:com.typesafe/ssl-config-core_2.13/0.4.2</bundle>
-        <bundle>mvn:io.aeron/aeron-client/1.37.0</bundle>
-        <bundle>mvn:io.aeron/aeron-driver/1.37.0</bundle>
+        <bundle>mvn:com.typesafe/config/1.4.2</bundle>
+        <bundle>mvn:com.typesafe/ssl-config-core_2.13/0.4.3</bundle>
+        <bundle>mvn:io.aeron/aeron-client/1.38.1</bundle>
+        <bundle>mvn:io.aeron/aeron-driver/1.38.1</bundle>
         <bundle>mvn:io.netty/netty/3.10.6.Final</bundle>
-        <bundle>mvn:org.agrona/agrona/1.14.0</bundle>
+        <bundle>mvn:org.agrona/agrona/1.15.2</bundle>
         <bundle>mvn:org.opendaylight.controller/repackaged-akka/${project.version}</bundle>
-        <bundle>mvn:org.reactivestreams/reactive-streams/1.0.3</bundle>
+        <bundle>mvn:org.reactivestreams/reactive-streams/1.0.4</bundle>
         <feature>wrap</feature>
         <bundle>wrap:mvn:org.lmdbjava/lmdbjava/0.7.0</bundle>
     </feature>
index 3c1ae262a9f4a200a6638f68b56063514e66a3fe..b6bba0b97c7e83fc2be0173e1c51316587ebb610 100644 (file)
@@ -12,7 +12,7 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>single-feature-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../single-feature-parent</relativePath>
     </parent>
 
index c18573aa3a5958e25ae0b647107943f97aca6118..82dfaca522424d62d8490419c7163337ff4e0b40 100644 (file)
@@ -8,10 +8,10 @@
  -->
 <features xmlns="http://karaf.apache.org/xmlns/features/v1.2.0" name="odl-controller-blueprint-${project.version}">
     <feature name="odl-controller-blueprint" version="${project.version}">
-        <feature version="[7,8)">odl-yangtools-codec</feature>
-        <feature version="[8,9)">odl-mdsal-binding-api</feature>
-        <feature version="[8,9)">odl-mdsal-binding-runtime</feature>
-        <feature version="[8,9)">odl-mdsal-dom-api</feature>
+        <feature version="[13,14)">odl-yangtools-codec</feature>
+        <feature version="[13,14)">odl-mdsal-binding-api</feature>
+        <feature version="[13,14)">odl-mdsal-binding-runtime</feature>
+        <feature version="[13,14)">odl-mdsal-dom-api</feature>
         <bundle start-level="40">mvn:org.opendaylight.controller/blueprint/${project.version}</bundle>
     </feature>
 </features>
index 7ed88b092cee70004560d4131b400a81c52aff24..9bb7bc3a649e4d7eafe868d0263138a0b3671221 100644 (file)
@@ -11,7 +11,7 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>single-feature-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../single-feature-parent</relativePath>
     </parent>
 
index 9a765beb481c7ae395c9328b63fb960db75f829e..2068ab79f94f735afd99092299f07d2e034639d6 100644 (file)
@@ -8,9 +8,9 @@
  -->
 <features xmlns="http://karaf.apache.org/xmlns/features/v1.2.0" name="odl-mdsal-${project.version}">
     <feature name="odl-controller-broker-local" version="${project.version}">
-        <feature version="[8,9)">odl-mdsal-dom</feature>
-        <feature version="[8,9)">odl-mdsal-eos-binding</feature>
-        <feature version="[8,9)">odl-mdsal-eos-dom</feature>
-        <feature version="[8,9)">odl-mdsal-singleton-dom</feature>
+        <feature version="[13,14)">odl-mdsal-dom</feature>
+        <feature version="[13,14)">odl-mdsal-eos-binding</feature>
+        <feature version="[13,14)">odl-mdsal-eos-dom</feature>
+        <feature version="[13,14)">odl-mdsal-singleton-dom</feature>
     </feature>
 </features>
diff --git a/features/odl-controller-exp-netty-config/pom.xml b/features/odl-controller-exp-netty-config/pom.xml
deleted file mode 100644 (file)
index 4a29734..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright Â© 2016, 2017 Red Hat, Inc. and others.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-    <parent>
-        <groupId>org.opendaylight.controller</groupId>
-        <artifactId>single-feature-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
-        <relativePath>../single-feature-parent</relativePath>
-    </parent>
-
-    <artifactId>odl-controller-exp-netty-config</artifactId>
-    <packaging>feature</packaging>
-    <name>OpenDaylight :: Controller :: Experimental :: Netty Configuration</name>
-    <description>Common configuration for Netty resources</description>
-
-    <dependencies>
-        <dependency>
-            <groupId>org.opendaylight.odlparent</groupId>
-            <artifactId>odl-netty-4</artifactId>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>netty-event-executor-config</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>netty-threadgroup-config</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>netty-timer-config</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>threadpool-config-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>threadpool-config-impl</artifactId>
-        </dependency>
-    </dependencies>
-</project>
diff --git a/features/odl-controller-exp-netty-config/src/main/feature/feature.xml b/features/odl-controller-exp-netty-config/src/main/feature/feature.xml
deleted file mode 100644 (file)
index 2eac124..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
-<!--
- Copyright Â© 2018 Red Hat, Inc. and others.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-<features xmlns="http://karaf.apache.org/xmlns/features/v1.4.0" name="odl-controller-exp-netty-config">
-    <feature name="odl-controller-exp-netty-config">
-        <feature version="[9,10)">odl-netty-4</feature>
-    </feature>
-</features>
index 06307baf38f0c9335b4258ff5138d7101172fa9d..be8cc89a13637664a0b9eca7b453169d3f426889 100644 (file)
@@ -11,7 +11,7 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>single-feature-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../single-feature-parent</relativePath>
     </parent>
 
index ffc34187e5031954f1db320a961ec25e01f90102..7ae191c7bed7e82e75de2eb459337f6628b2e3a4 100644 (file)
@@ -8,7 +8,7 @@
  -->
 <features xmlns="http://karaf.apache.org/xmlns/features/v1.2.0" name="odl-controller-mdsal-common-${project.version}">
     <feature name="odl-controller-mdsal-common" version="${project.version}">
-        <feature version="[8,9)">odl-mdsal-common</feature>
-        <feature version="[8,9)">odl-mdsal-binding-runtime</feature>
+        <feature version="[13,14)">odl-mdsal-common</feature>
+        <feature version="[13,14)">odl-mdsal-binding-runtime</feature>
     </feature>
 </features>
index 7bc24ca458b176a84679032ff9353e86de00e192..565397956434d9f039fcdb7be76e90140788e21f 100644 (file)
@@ -14,7 +14,7 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>single-feature-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../single-feature-parent</relativePath>
     </parent>
 
index b4469d23d0ab3f48886ea7c18ccdd7fe913dad76..73764f6fec799e4218cd01cea03f18079aa42516 100644 (file)
@@ -3,7 +3,7 @@
     <feature version="0.0.0">
         <bundle>mvn:org.scala-lang.modules/scala-java8-compat_2.13/1.0.2</bundle>
         <bundle>mvn:org.scala-lang.modules/scala-parser-combinators_2.13/1.1.2</bundle>
-        <bundle>mvn:org.scala-lang/scala-library/2.13.8</bundle>
-        <bundle>mvn:org.scala-lang/scala-reflect/2.13.8</bundle>
+        <bundle>mvn:org.scala-lang/scala-library/2.13.13</bundle>
+        <bundle>mvn:org.scala-lang/scala-reflect/2.13.13</bundle>
     </feature>
 </features>
index 48dedfda35eb5b27b43a101bbacfee0c99db6ce6..9ea689a4a034e6e2e943ee67cefc0afb9312ca28 100644 (file)
@@ -11,7 +11,7 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>single-feature-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../single-feature-parent</relativePath>
     </parent>
 
index 5945a4fb93a262432f633ad10e285f782fd0eb71..42b2d9e4014654852e72ea7df243a17951149bd9 100644 (file)
@@ -11,7 +11,7 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>single-feature-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../single-feature-parent</relativePath>
     </parent>
 
index 95740769acd5db8c0258f67dcbd123a37b34edc9..686971b49f27b6ff823add1920828a31e4113279 100644 (file)
@@ -11,7 +11,7 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>single-feature-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../single-feature-parent</relativePath>
     </parent>
 
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>odl-controller-blueprint</artifactId>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
     </dependencies>
 </project>
index 11af9450e10a7287b99f8351b3984d04ee5ec036..ffa724ca99c9d55b485b062e0439861984f4b5a7 100644 (file)
@@ -8,7 +8,7 @@
  -->
 <features xmlns="http://karaf.apache.org/xmlns/features/v1.2.0" name="odl-mdsal-${project.version}">
     <feature name="odl-mdsal-broker" version="${project.version}">
-        <feature version="[8,9)">odl-mdsal-singleton-dom</feature>
-        <feature version="[8,9)">odl-mdsal-eos-binding</feature>
+        <feature version="[13,14)">odl-mdsal-singleton-dom</feature>
+        <feature version="[13,14)">odl-mdsal-eos-binding</feature>
     </feature>
 </features>
index a9c1831daab8fc77e7d6fedec19df7988a5b3ac7..5d1bbbeb51ebc545edb2c2892b70d621bd16603e 100644 (file)
@@ -11,7 +11,7 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>single-feature-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../single-feature-parent</relativePath>
     </parent>
 
index 9a048e910a0a6ffbba7fe7df3fab32d1d972efd6..f4034164f185861c2858c6ab5db297bc8a5a2331 100644 (file)
@@ -8,10 +8,10 @@
   -->
 <features xmlns="http://karaf.apache.org/xmlns/features/v1.4.0" name="odl-controller-${project.version}">
     <feature name="odl-mdsal-clustering-commons" version="${project.version}">
-        <feature version="[9,10)">odl-apache-commons-lang3</feature>
-        <feature version="[9,10)">odl-dropwizard-metrics</feature>
-        <feature version="[9,10)">odl-servlet-api</feature>
-        <feature version="[7,8)">odl-yangtools-data</feature>
-        <feature version="[7,8)">odl-yangtools-codec</feature>
+        <feature version="[13,14)">odl-apache-commons-lang3</feature>
+        <feature version="[13,14)">odl-dropwizard-metrics</feature>
+        <feature version="[13,14)">odl-servlet-api</feature>
+        <feature version="[13,14)">odl-yangtools-data</feature>
+        <feature version="[13,14)">odl-yangtools-codec</feature>
     </feature>
 </features>
index 62659c8464b9b96fd46c36e6e99bb7765898a392..35b1d52094eb8c3c0676eb47d1dc4924d109c85c 100644 (file)
@@ -11,7 +11,7 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>single-feature-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../single-feature-parent</relativePath>
     </parent>
 
index 756bd7cf5fd84f5315b03f8c7b28b31f838e1738..bf7451b93d4e7f421775592685d269ef309cf8a7 100644 (file)
@@ -8,11 +8,11 @@
   -->
 <features xmlns="http://karaf.apache.org/xmlns/features/v1.4.0" name="odl-controller-${project.version}">
     <feature name="odl-mdsal-distributed-datastore" version="${project.version}">
-        <feature version="[9,10)">odl-apache-commons-text</feature>
-        <feature version="[7,8)">odl-yangtools-codec</feature>
-        <feature version="[8,9)">odl-mdsal-eos-dom</feature>
-        <feature version="[8,9)">odl-mdsal-dom-broker</feature>
-        <feature version="[8,9)">odl-mdsal-binding-dom-adapter</feature>
+        <feature version="[13,14)">odl-apache-commons-text</feature>
+        <feature version="[13,14)">odl-yangtools-codec</feature>
+        <feature version="[13,14)">odl-mdsal-eos-dom</feature>
+        <feature version="[13,14)">odl-mdsal-dom-broker</feature>
+        <feature version="[13,14)">odl-mdsal-binding-dom-adapter</feature>
         <configfile finalname="configuration/initial/akka.conf">
             mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/akkaconf
         </configfile>
index a3cb6f00fec93da5bd27a7fd0e40907acb02df9a..68033905be04c53fbc578292873149454ce68af5 100644 (file)
@@ -11,7 +11,7 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>single-feature-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../single-feature-parent</relativePath>
     </parent>
 
index 2fd4ef5f16195fdcd9140fad766202d9afcccb50..01bfd40440011097c518f5857f82b73c9d9c2871 100644 (file)
@@ -11,7 +11,7 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>single-feature-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../single-feature-parent</relativePath>
     </parent>
 
index 07800a651bb96d90ee4109da30ee44a6d81c8328..9a6b3e79d167994c6c0165da52cc9380ccc38be7 100644 (file)
@@ -8,6 +8,6 @@
  -->
 <features xmlns="http://karaf.apache.org/xmlns/features/v1.2.0" name="odl-toaster-${project.version}">
     <feature name="odl-toaster" version="${project.version}">
-        <feature version="[8,9)">odl-mdsal-binding-runtime</feature>
+        <feature version="[13,14)">odl-mdsal-binding-runtime</feature>
     </feature>
 </features>
index bc571e0eb080228442a3e666bdfaa97ac98aaee7..97ade422e27ceb93f12e2209f598f19fff641e95 100644 (file)
   <parent>
     <groupId>org.opendaylight.odlparent</groupId>
     <artifactId>odlparent-lite</artifactId>
-    <version>9.0.12</version>
+    <version>13.0.11</version>
     <relativePath/>
   </parent>
 
   <groupId>org.opendaylight.controller</groupId>
   <artifactId>features-aggregator</artifactId>
-  <version>5.0.0-SNAPSHOT</version>
+  <version>9.0.3-SNAPSHOT</version>
   <packaging>pom</packaging>
 
   <properties>
@@ -34,7 +34,7 @@
 
     <!-- Experimental features -->
     <module>features-controller-experimental</module>
-    <module>odl-controller-exp-netty-config</module>
+    <module>odl-toaster</module>
 
     <!-- CSIT features -->
     <module>features-controller-testing</module>
 
     <!-- Single features, to be cleaned up -->
     <module>odl-controller-blueprint</module>
+    <module>odl-controller-broker-local</module>
     <module>odl-controller-mdsal-common</module>
     <module>odl-jolokia</module>
-    <module>odl-controller-broker-local</module>
     <module>odl-mdsal-broker</module>
     <module>odl-mdsal-clustering-commons</module>
     <module>odl-mdsal-distributed-datastore</module>
     <module>odl-mdsal-remoterpc-connector</module>
-    <module>odl-toaster</module>
   </modules>
 </project>
index 820bd9497dc860730b192befa4c35aefd3d48b93..556d3acd7632b2313b4d00dcf771314c51fc3df0 100644 (file)
     <parent>
         <groupId>org.opendaylight.odlparent</groupId>
         <artifactId>single-feature-parent</artifactId>
-        <version>9.0.12</version>
+        <version>13.0.11</version>
         <relativePath/>
     </parent>
 
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>single-feature-parent</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <packaging>pom</packaging>
 
     <dependencyManagement>
@@ -25,7 +25,7 @@
             <dependency>
                 <groupId>org.opendaylight.controller</groupId>
                 <artifactId>bundle-parent</artifactId>
-                <version>5.0.0-SNAPSHOT</version>
+                <version>9.0.3-SNAPSHOT</version>
                 <type>pom</type>
                 <scope>import</scope>
             </dependency>
index 630133388619e1d04c48a74767a4043d52601d1b..1d98663b1f762f3e9c255ab26d9ee0182be1ebe9 100644 (file)
   <parent>
     <groupId>org.opendaylight.odlparent</groupId>
     <artifactId>odlparent</artifactId>
-    <version>9.0.12</version>
+    <version>13.0.11</version>
     <relativePath/>
   </parent>
 
   <groupId>org.opendaylight.controller</groupId>
   <artifactId>odl-jolokia-osgi</artifactId>
-  <version>5.0.0-SNAPSHOT</version>
+  <version>9.0.3-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <build>
index 0476bdf509234e4423d7da43edd0db5f3dfc809d..56869cc048d81cf36757c86e5c7dd09b046d994f 100644 (file)
   <parent>
     <groupId>org.opendaylight.odlparent</groupId>
     <artifactId>karaf4-parent</artifactId>
-    <version>9.0.12</version>
+    <version>13.0.11</version>
     <relativePath/>
   </parent>
 
   <groupId>org.opendaylight.controller</groupId>
   <artifactId>controller-test-karaf</artifactId>
-  <version>5.0.0-SNAPSHOT</version>
+  <version>9.0.3-SNAPSHOT</version>
   <packaging>pom</packaging>
 
   <dependencyManagement>
index ae6f8f7cb2d5ee62fcd022e47c3f500f22b1dfec..2b7bfeb106fd238f80a03c86e354f6ebcd426ee0 100644 (file)
@@ -12,7 +12,7 @@
   <parent>
     <groupId>org.opendaylight.odlparent</groupId>
     <artifactId>bundle-parent</artifactId>
-    <version>9.0.12</version>
+    <version>13.0.11</version>
     <relativePath/>
   </parent>
 
   <artifactId>blueprint</artifactId>
   <packaging>bundle</packaging>
   <name>${project.artifactId}</name>
-  <version>5.0.0-SNAPSHOT</version>
+  <version>9.0.3-SNAPSHOT</version>
 
   <dependencyManagement>
     <dependencies>
       <dependency>
         <groupId>org.opendaylight.yangtools</groupId>
         <artifactId>yangtools-artifacts</artifactId>
-        <version>7.0.12</version>
+        <version>13.0.2</version>
         <type>pom</type>
         <scope>import</scope>
       </dependency>
       <dependency>
         <groupId>org.opendaylight.mdsal</groupId>
         <artifactId>mdsal-artifacts</artifactId>
-        <version>8.0.10</version>
+        <version>13.0.1</version>
         <type>pom</type>
         <scope>import</scope>
       </dependency>
       <groupId>com.google.guava</groupId>
       <artifactId>guava</artifactId>
     </dependency>
+    <dependency>
+      <groupId>com.github.spotbugs</groupId>
+      <artifactId>spotbugs-annotations</artifactId>
+      <optional>true</optional>
+    </dependency>
     <dependency>
       <groupId>org.apache.aries.blueprint</groupId>
       <artifactId>org.apache.aries.blueprint.core</artifactId>
       <groupId>org.apache.aries</groupId>
       <artifactId>org.apache.aries.util</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>concepts</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>util</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-impl</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-codec-xml</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-model-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-model-util</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>mdsal-common-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>mdsal-dom-api</artifactId>
+    </dependency>
     <dependency>
       <groupId>org.opendaylight.mdsal</groupId>
       <artifactId>mdsal-dom-spi</artifactId>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-binding-dom-codec</artifactId>
+      <artifactId>mdsal-binding-dom-codec-api</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-data-codec-xml</artifactId>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>mdsal-binding-spec-util</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.osgi</groupId>
-      <artifactId>osgi.core</artifactId>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>yang-binding</artifactId>
     </dependency>
     <dependency>
       <groupId>org.osgi</groupId>
-      <artifactId>osgi.cmpn</artifactId>
+      <artifactId>org.osgi.framework</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-api</artifactId>
+      <groupId>org.osgi</groupId>
+      <artifactId>org.osgi.service.cm</artifactId>
     </dependency>
     <dependency>
       <groupId>org.osgi</groupId>
       <artifactId>org.osgi.service.event</artifactId>
     </dependency>
-
     <dependency>
-      <groupId>com.google.truth</groupId>
-      <artifactId>truth</artifactId>
+      <groupId>org.osgi</groupId>
+      <artifactId>org.osgi.util.tracker</artifactId>
     </dependency>
+
     <dependency>
       <groupId>org.opendaylight.mdsal</groupId>
       <artifactId>mdsal-binding-test-model</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>mdsal-binding-dom-adapter</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>mdsal-binding-dom-adapter</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
     <dependency>
       <groupId>org.opendaylight.mdsal</groupId>
       <artifactId>mdsal-binding-test-utils</artifactId>
index 7ad8ddb4e3a5e4b9fd83096d99344cff4a257a06..55994ca1f093ddf4b86a02ccc8847166d679dc8d 100644 (file)
@@ -7,28 +7,26 @@
  */
 package org.opendaylight.controller.blueprint;
 
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.Dictionary;
 import java.util.Enumeration;
 import java.util.HashSet;
-import java.util.Hashtable;
 import java.util.List;
+import java.util.Map;
 import org.apache.aries.blueprint.NamespaceHandler;
 import org.apache.aries.blueprint.services.BlueprintExtenderService;
 import org.apache.aries.quiesce.participant.QuiesceParticipant;
 import org.apache.aries.util.AriesFrameworkUtil;
 import org.eclipse.jdt.annotation.Nullable;
-import org.gaul.modernizer_maven_annotations.SuppressModernizer;
 import org.opendaylight.controller.blueprint.ext.OpendaylightNamespaceHandler;
 import org.opendaylight.yangtools.util.xml.UntrustedXML;
 import org.osgi.framework.Bundle;
 import org.osgi.framework.BundleActivator;
 import org.osgi.framework.BundleContext;
 import org.osgi.framework.BundleEvent;
+import org.osgi.framework.FrameworkUtil;
 import org.osgi.framework.ServiceReference;
 import org.osgi.framework.ServiceRegistration;
 import org.osgi.framework.SynchronousBundleListener;
@@ -131,8 +129,6 @@ public class BlueprintBundleTracker implements BundleActivator, BundleTrackerCus
         quiesceParticipantTracker.open();
     }
 
-    @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-            justification = "https://github.com/spotbugs/spotbugs/issues/811")
     private QuiesceParticipant onQuiesceParticipantAdded(final ServiceReference<QuiesceParticipant> reference) {
         quiesceParticipant = reference.getBundle().getBundleContext().getService(reference);
 
@@ -143,8 +139,6 @@ public class BlueprintBundleTracker implements BundleActivator, BundleTrackerCus
         return quiesceParticipant;
     }
 
-    @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-            justification = "https://github.com/spotbugs/spotbugs/issues/811")
     private BlueprintExtenderService onBlueprintExtenderServiceAdded(
             final ServiceReference<BlueprintExtenderService> reference) {
         blueprintExtenderService = reference.getBundle().getBundleContext().getService(reference);
@@ -163,20 +157,15 @@ public class BlueprintBundleTracker implements BundleActivator, BundleTrackerCus
     }
 
     private void registerNamespaceHandler(final BundleContext context) {
-        Dictionary<String, Object> props = emptyDict();
-        props.put("osgi.service.blueprint.namespace", OpendaylightNamespaceHandler.NAMESPACE_1_0_0);
-        namespaceReg = context.registerService(NamespaceHandler.class, new OpendaylightNamespaceHandler(), props);
+        namespaceReg = context.registerService(NamespaceHandler.class, new OpendaylightNamespaceHandler(),
+            FrameworkUtil.asDictionary(Map.of(
+                "osgi.service.blueprint.namespace", OpendaylightNamespaceHandler.NAMESPACE_1_0_0)));
     }
 
     private void registerBlueprintEventHandler(final BundleContext context) {
         eventHandlerReg = context.registerService(BlueprintListener.class, this, null);
     }
 
-    @SuppressModernizer
-    private static Dictionary<String, Object> emptyDict() {
-        return new Hashtable<>();
-    }
-
     /**
      * Implemented from BundleActivator.
      */
@@ -274,13 +263,12 @@ public class BlueprintBundleTracker implements BundleActivator, BundleTrackerCus
         return !paths.isEmpty() ? paths : findBlueprintPaths(bundle, ODL_CUSTOM_BLUEPRINT_FILE_PATH);
     }
 
-    @SuppressWarnings({ "rawtypes", "unchecked" })
     private static List<Object> findBlueprintPaths(final Bundle bundle, final String path) {
         Enumeration<?> rntries = bundle.findEntries(path, BLUEPRINT_FLE_PATTERN, false);
         if (rntries == null) {
-            return Collections.emptyList();
+            return List.of();
         } else {
-            return Collections.list((Enumeration)rntries);
+            return List.copyOf(Collections.list(rntries));
         }
     }
 
diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/AbstractInvokableServiceMetadata.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/AbstractInvokableServiceMetadata.java
deleted file mode 100644 (file)
index 35f59f8..0000000
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Copyright (c) 2016 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.base.MoreObjects;
-import com.google.common.collect.ImmutableSet;
-import java.util.Collection;
-import java.util.Set;
-import java.util.function.Predicate;
-import org.apache.aries.blueprint.services.ExtendedBlueprintContainer;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.mdsal.dom.api.DOMRpcAvailabilityListener;
-import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMRpcService;
-import org.opendaylight.mdsal.dom.api.DOMSchemaService;
-import org.opendaylight.mdsal.dom.spi.RpcRoutingStrategy;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.osgi.service.blueprint.container.ComponentDefinitionException;
-
-abstract class AbstractInvokableServiceMetadata extends AbstractDependentComponentFactoryMetadata {
-    private final String interfaceName;
-
-    private ListenerRegistration<DOMRpcAvailabilityListener> rpcListenerReg;
-    private RpcConsumerRegistry rpcRegistry;
-    private Class<RpcService> rpcInterface;
-    private Set<QName> rpcSchemaPaths;
-
-    AbstractInvokableServiceMetadata(final String id, final String interfaceName) {
-        super(id);
-        this.interfaceName = requireNonNull(interfaceName);
-    }
-
-    Class<RpcService> rpcInterface() {
-        return rpcInterface;
-    }
-
-    @SuppressWarnings({ "checkstyle:IllegalCatch", "unchecked" })
-    @Override
-    public final void init(final ExtendedBlueprintContainer container) {
-        super.init(container);
-
-        final Class<?> interfaceClass;
-        try {
-            interfaceClass = container().getBundleContext().getBundle().loadClass(interfaceName);
-        } catch (final Exception e) {
-            throw new ComponentDefinitionException(String.format("%s: Error obtaining interface class %s",
-                    logName(), interfaceName), e);
-        }
-
-        if (!RpcService.class.isAssignableFrom(interfaceClass)) {
-            throw new ComponentDefinitionException(String.format(
-                "%s: The specified interface %s is not an RpcService", logName(), interfaceName));
-        }
-
-        rpcInterface = (Class<RpcService>)interfaceClass;
-    }
-
-    @Override
-    protected final void startTracking() {
-        // Request RpcProviderRegistry first ...
-        retrieveService("RpcConsumerRegistry", RpcConsumerRegistry.class, this::onRpcRegistry);
-    }
-
-    private void onRpcRegistry(final Object service) {
-        log.debug("{}: Retrieved RpcProviderRegistry {}", logName(), service);
-        rpcRegistry = (RpcConsumerRegistry)service;
-
-        // Now acquire SchemaService...
-        retrieveService("SchemaService", DOMSchemaService.class, this::onSchemaService);
-    }
-
-    private void onSchemaService(final Object service) {
-        log.debug("{}: Retrieved SchemaService {}", logName(), service);
-
-        // Now get the SchemaContext and trigger RPC resolution
-        retrievedSchemaContext(((DOMSchemaService)service).getGlobalContext());
-    }
-
-    private void retrievedSchemaContext(final SchemaContext schemaContext) {
-        log.debug("{}: retrievedSchemaContext", logName());
-
-        final Collection<QName> schemaPaths = RpcUtil.decomposeRpcService(rpcInterface, schemaContext,
-            rpcFilter());
-        if (schemaPaths.isEmpty()) {
-            log.debug("{}: interface {} has no acceptable entries, assuming it is satisfied", logName(), rpcInterface);
-            setSatisfied();
-            return;
-        }
-
-        rpcSchemaPaths = ImmutableSet.copyOf(schemaPaths);
-        log.debug("{}: Got SchemaPaths: {}", logName(), rpcSchemaPaths);
-
-        // First get the DOMRpcService OSGi service. This will be used to register a listener to be notified
-        // when the underlying DOM RPC service is available.
-        retrieveService("DOMRpcService", DOMRpcService.class, this::retrievedDOMRpcService);
-    }
-
-    private void retrievedDOMRpcService(final Object service) {
-        log.debug("{}: retrievedDOMRpcService {}", logName(), service);
-        final DOMRpcService domRpcService = (DOMRpcService)service;
-
-        setDependencyDesc("Available DOM RPC for binding RPC: " + rpcInterface);
-        rpcListenerReg = domRpcService.registerRpcListener(new DOMRpcAvailabilityListener() {
-            @Override
-            public void onRpcAvailable(final Collection<DOMRpcIdentifier> rpcs) {
-                onRpcsAvailable(rpcs);
-            }
-
-            @Override
-            public void onRpcUnavailable(final Collection<DOMRpcIdentifier> rpcs) {
-            }
-        });
-    }
-
-    abstract Predicate<RpcRoutingStrategy> rpcFilter();
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    @Override
-    public final Object create() throws ComponentDefinitionException {
-        log.debug("{}: In create: interfaceName: {}", logName(), interfaceName);
-
-        super.onCreate();
-
-        try {
-            RpcService rpcService = rpcRegistry.getRpcService(rpcInterface);
-
-            log.debug("{}: create returning service {}", logName(), rpcService);
-
-            return rpcService;
-        } catch (final RuntimeException e) {
-            throw new ComponentDefinitionException("Error getting RPC service for " + interfaceName, e);
-        }
-    }
-
-    protected final void onRpcsAvailable(final Collection<DOMRpcIdentifier> rpcs) {
-        for (DOMRpcIdentifier identifier : rpcs) {
-            if (rpcSchemaPaths.contains(identifier.getType())) {
-                log.debug("{}: onRpcsAvailable - found SchemaPath {}", logName(), identifier.getType());
-                setSatisfied();
-                break;
-            }
-        }
-    }
-
-    @Override
-    public final void stopTracking() {
-        super.stopTracking();
-        closeRpcListenerReg();
-    }
-
-    private void closeRpcListenerReg() {
-        if (rpcListenerReg != null) {
-            rpcListenerReg.close();
-            rpcListenerReg = null;
-        }
-    }
-
-    @Override
-    public final void destroy(final Object instance) {
-        super.destroy(instance);
-        closeRpcListenerReg();
-    }
-
-    @Override
-    public final String toString() {
-        return MoreObjects.toStringHelper(this).add("id", getId()).add("interfaceName", interfaceName).toString();
-    }
-}
diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/ActionProviderBean.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/ActionProviderBean.java
deleted file mode 100644 (file)
index cb97fb0..0000000
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import com.google.common.collect.Collections2;
-import com.google.common.collect.ImmutableSet;
-import java.util.Collection;
-import java.util.Set;
-import org.opendaylight.mdsal.binding.api.RpcProviderService;
-import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMRpcImplementationNotAvailableException;
-import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
-import org.opendaylight.mdsal.dom.api.DOMSchemaService;
-import org.opendaylight.mdsal.dom.spi.RpcRoutingStrategy;
-import org.opendaylight.yangtools.concepts.Registration;
-import org.opendaylight.yangtools.util.concurrent.FluentFutures;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.osgi.framework.Bundle;
-import org.osgi.service.blueprint.container.ComponentDefinitionException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Blueprint bean corresponding to the "action-provider" element that registers the promise to instantiate action
- * instances with RpcProviderRegistry.
- *
- * <p>
- * This bean has two distinct facets:
- * - if a reference bean is provided, it registers it with {@link RpcProviderService}
- * - if a reference bean is not provided, it registers the corresponding no-op implementation with
- *   {@link DOMRpcProviderService} for all action (Routed RPC) elements in the provided interface
- *
- * @author Robert Varga
- */
-public class ActionProviderBean {
-    static final String ACTION_PROVIDER = "action-provider";
-
-    private static final Logger LOG = LoggerFactory.getLogger(ActionProviderBean.class);
-
-    private DOMRpcProviderService domRpcProvider;
-    private RpcProviderService bindingRpcProvider;
-    private DOMSchemaService schemaService;
-    private RpcService implementation;
-    private String interfaceName;
-    private Registration reg;
-    private Bundle bundle;
-
-    public void setBundle(final Bundle bundle) {
-        this.bundle = bundle;
-    }
-
-    public void setInterfaceName(final String interfaceName) {
-        this.interfaceName = interfaceName;
-    }
-
-    public void setImplementation(final RpcService implementation) {
-        this.implementation = implementation;
-    }
-
-    public void setDomRpcProvider(final DOMRpcProviderService rpcProviderService) {
-        this.domRpcProvider = rpcProviderService;
-    }
-
-    public void setBindingRpcProvider(final RpcProviderService rpcProvider) {
-        this.bindingRpcProvider = rpcProvider;
-    }
-
-    public void setSchemaService(final DOMSchemaService schemaService) {
-        this.schemaService = schemaService;
-    }
-
-    public void init() {
-        // First resolve the interface class
-        final Class<RpcService> interfaceClass = getRpcClass();
-
-        LOG.debug("{}: resolved interface {} to {}", ACTION_PROVIDER, interfaceName, interfaceClass);
-
-        if (implementation != null) {
-            registerImplementation(interfaceClass);
-        } else {
-            registerFallback(interfaceClass);
-        }
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void destroy() {
-        if (reg != null) {
-            try {
-                reg.close();
-            } catch (final Exception e) {
-                LOG.warn("{}: error while unregistering", ACTION_PROVIDER, e);
-            } finally {
-                reg = null;
-            }
-        }
-    }
-
-    @SuppressWarnings("unchecked")
-    private Class<RpcService> getRpcClass() {
-        final Class<?> iface;
-
-        try {
-            iface = bundle.loadClass(interfaceName);
-        } catch (final ClassNotFoundException e) {
-            throw new ComponentDefinitionException(String.format(
-                "The specified \"interface\" for %s \"%s\" does not refer to an available class", interfaceName,
-                ACTION_PROVIDER), e);
-        }
-        if (!RpcService.class.isAssignableFrom(iface)) {
-            throw new ComponentDefinitionException(String.format(
-                "The specified \"interface\" %s for \"%s\" is not an RpcService", interfaceName, ACTION_PROVIDER));
-        }
-
-        return (Class<RpcService>) iface;
-    }
-
-    private void registerFallback(final Class<RpcService> interfaceClass) {
-        final Collection<QName> paths = RpcUtil.decomposeRpcService(interfaceClass,
-            schemaService.getGlobalContext(), RpcRoutingStrategy::isContextBasedRouted);
-        if (paths.isEmpty()) {
-            LOG.warn("{}: interface {} has no actions defined", ACTION_PROVIDER, interfaceClass);
-            return;
-        }
-
-        final Set<DOMRpcIdentifier> rpcs = ImmutableSet.copyOf(Collections2.transform(paths, DOMRpcIdentifier::create));
-        reg = domRpcProvider.registerRpcImplementation(
-            (rpc, input) -> FluentFutures.immediateFailedFluentFuture(new DOMRpcImplementationNotAvailableException(
-                "Action %s has no instance matching %s", rpc, input)), rpcs);
-        LOG.debug("Registered provider for {}", interfaceName);
-    }
-
-    private void registerImplementation(final Class<RpcService> interfaceClass) {
-        if (!interfaceClass.isInstance(implementation)) {
-            throw new ComponentDefinitionException(String.format(
-                "The specified \"interface\" %s for \"%s\" is not implemented by RpcService \"ref\" %s",
-                interfaceName, ACTION_PROVIDER, implementation.getClass()));
-        }
-
-        reg = bindingRpcProvider.registerRpcImplementation(interfaceClass, implementation);
-        LOG.debug("Registered implementation {} for {}", implementation, interfaceName);
-    }
-}
diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/ActionServiceMetadata.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/ActionServiceMetadata.java
deleted file mode 100644 (file)
index 5bb3f14..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import java.util.function.Predicate;
-import org.opendaylight.mdsal.dom.spi.RpcRoutingStrategy;
-
-/**
- * Factory metadata corresponding to the "action-service" element. It waits for a DOM promise of registration
- * to appear in the {@link DOMRpcService} and then acquires a dynamic proxy via RpcProviderRegistry.
- *
- * @author Robert Varga
- */
-final class ActionServiceMetadata extends AbstractInvokableServiceMetadata {
-    /*
-     * Implementation note:
-     *
-     * This implementation assumes Binding V1 semantics for actions, which means actions are packaged along with RPCs
-     * into a single interface. This has interesting implications on working with RpcServiceMetadata, which only
-     * handles the RPC side of the contract.
-     *
-     * Further interesting interactions stem from the fact that in DOM world each action is a separate entity, so the
-     * interface contract can let some actions to be invoked, while failing for others. This is a shortcoming of the
-     * Binding Specification and will be addressed in Binding V2 -- where each action is its own interface.
-     */
-    ActionServiceMetadata(final String id, final String interfaceName) {
-        super(id, interfaceName);
-    }
-
-    @Override
-    Predicate<RpcRoutingStrategy> rpcFilter() {
-        return RpcRoutingStrategy::isContextBasedRouted;
-    }
-}
index dd672e411081ba2e16fbaa7504cdf704b4c888eb..67905aeaf86b3ebcb8f8d1c58cc9fce5549584df 100644 (file)
@@ -15,23 +15,25 @@ import java.io.IOException;
 import java.lang.reflect.InvocationTargetException;
 import java.net.URISyntaxException;
 import java.util.Set;
-import javax.xml.parsers.ParserConfigurationException;
 import javax.xml.stream.XMLStreamException;
 import javax.xml.transform.dom.DOMSource;
 import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
 import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.Identifiable;
-import org.opendaylight.yangtools.yang.binding.Identifier;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.Key;
+import org.opendaylight.yangtools.yang.binding.KeyAware;
+import org.opendaylight.yangtools.yang.binding.contract.Naming;
 import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
 import org.opendaylight.yangtools.yang.data.codec.xml.XmlParserStream;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNormalizedNodeStreamWriter;
-import org.opendaylight.yangtools.yang.data.impl.schema.NormalizedNodeResult;
+import org.opendaylight.yangtools.yang.data.impl.schema.NormalizationResultHolder;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
 import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
 import org.opendaylight.yangtools.yang.model.api.ListSchemaNode;
@@ -48,11 +50,9 @@ import org.xml.sax.SAXException;
  * @author Thomas Pantelis (originally; re-factored by Michael Vorburger.ch)
  */
 public abstract class BindingContext {
-    private static String GET_KEY_METHOD = "key";
-
     public static BindingContext create(final String logName, final Class<? extends DataObject> klass,
             final String appConfigListKeyValue) {
-        if (Identifiable.class.isAssignableFrom(klass)) {
+        if (KeyAware.class.isAssignableFrom(klass)) {
             // The binding class corresponds to a yang list.
             if (Strings.isNullOrEmpty(appConfigListKeyValue)) {
                 throw new ComponentDefinitionException(String.format(
@@ -75,12 +75,12 @@ public abstract class BindingContext {
     }
 
     public final InstanceIdentifier<DataObject> appConfigPath;
-    public final Class<DataObject> appConfigBindingClass;
+    public final Class<?> appConfigBindingClass;
     public final Class<? extends DataSchemaNode> schemaType;
     public final QName bindingQName;
 
-    private BindingContext(final Class<DataObject> appConfigBindingClass,
-            final InstanceIdentifier<DataObject> appConfigPath, final Class<? extends DataSchemaNode> schemaType) {
+    private BindingContext(final Class<?> appConfigBindingClass, final InstanceIdentifier<DataObject> appConfigPath,
+            final Class<? extends DataSchemaNode> schemaType) {
         this.appConfigBindingClass = appConfigBindingClass;
         this.appConfigPath = appConfigPath;
         this.schemaType = schemaType;
@@ -89,20 +89,14 @@ public abstract class BindingContext {
     }
 
     public NormalizedNode parseDataElement(final Element element, final SchemaTreeInference dataSchema)
-            throws XMLStreamException, IOException, ParserConfigurationException, SAXException, URISyntaxException {
-        final NormalizedNodeResult resultHolder = new NormalizedNodeResult();
+            throws XMLStreamException, IOException, SAXException, URISyntaxException {
+        final NormalizationResultHolder resultHolder = new NormalizationResultHolder();
         final NormalizedNodeStreamWriter writer = ImmutableNormalizedNodeStreamWriter.from(resultHolder);
         final XmlParserStream xmlParser = XmlParserStream.create(writer, dataSchema);
         xmlParser.traverse(new DOMSource(element));
 
-        final NormalizedNode result = resultHolder.getResult();
-        if (result instanceof MapNode) {
-            final MapNode mapNode = (MapNode) result;
-            final MapEntryNode mapEntryNode = mapNode.body().iterator().next();
-            return mapEntryNode;
-        }
-
-        return result;
+        final NormalizedNode result = resultHolder.getResult().data();
+        return result instanceof MapNode mapNode ? mapNode.body().iterator().next() : result;
     }
 
     public abstract NormalizedNode newDefaultNode(SchemaTreeInference dataSchema);
@@ -113,13 +107,13 @@ public abstract class BindingContext {
     private static class ContainerBindingContext extends BindingContext {
         @SuppressWarnings("unchecked")
         ContainerBindingContext(final Class<? extends DataObject> appConfigBindingClass) {
-            super((Class<DataObject>) appConfigBindingClass,
-                    InstanceIdentifier.create((Class<DataObject>) appConfigBindingClass), ContainerSchemaNode.class);
+            super(appConfigBindingClass, InstanceIdentifier.create((Class) appConfigBindingClass),
+                ContainerSchemaNode.class);
         }
 
         @Override
-        public NormalizedNode newDefaultNode(final SchemaTreeInference dataSchema) {
-            return ImmutableNodes.containerNode(bindingQName);
+        public ContainerNode newDefaultNode(final SchemaTreeInference dataSchema) {
+            return ImmutableNodes.newContainerBuilder().withNodeIdentifier(new NodeIdentifier(bindingQName)).build();
         }
     }
 
@@ -142,8 +136,8 @@ public abstract class BindingContext {
                 final String listKeyValue) throws InstantiationException, IllegalAccessException,
                 IllegalArgumentException, InvocationTargetException, NoSuchMethodException, SecurityException {
             // We assume the yang list key type is string.
-            Identifier keyInstance = (Identifier) bindingClass.getMethod(GET_KEY_METHOD).getReturnType()
-                    .getConstructor(String.class).newInstance(listKeyValue);
+            Key keyInstance = (Key) bindingClass.getMethod(Naming.KEY_AWARE_KEY_NAME)
+                .getReturnType().getConstructor(String.class).newInstance(listKeyValue);
             InstanceIdentifier appConfigPath = InstanceIdentifier.builder((Class)bindingClass, keyInstance).build();
             return new ListBindingContext(bindingClass, appConfigPath, listKeyValue);
         }
@@ -158,7 +152,10 @@ public abstract class BindingContext {
 
             checkArgument(keys.size() == 1, "Expected only 1 key for list %s", appConfigBindingClass);
             QName listKeyQName = keys.iterator().next();
-            return ImmutableNodes.mapEntryBuilder(bindingQName, listKeyQName, appConfigListKeyValue).build();
+            return ImmutableNodes.newMapEntryBuilder()
+                .withNodeIdentifier(NodeIdentifierWithPredicates.of(bindingQName, listKeyQName, appConfigListKeyValue))
+                .withChild(ImmutableNodes.leafNode(listKeyQName, appConfigListKeyValue))
+                .build();
         }
     }
 }
index 2f0709f6af8fae04096bbb963c2baf5134d89134..8e8d98ff365d62389aba90459f7abe0ccc6601ee 100644 (file)
@@ -10,8 +10,8 @@ package org.opendaylight.controller.blueprint.ext;
 import com.google.common.base.Strings;
 import java.util.ArrayList;
 import java.util.Dictionary;
-import java.util.Hashtable;
 import java.util.List;
+import java.util.Map;
 import java.util.Objects;
 import java.util.concurrent.atomic.AtomicBoolean;
 import org.apache.aries.blueprint.ComponentDefinitionRegistry;
@@ -20,13 +20,11 @@ import org.apache.aries.blueprint.ext.AbstractPropertyPlaceholder;
 import org.apache.aries.blueprint.mutable.MutableBeanMetadata;
 import org.apache.aries.blueprint.mutable.MutableServiceReferenceMetadata;
 import org.apache.aries.util.AriesFrameworkUtil;
-import org.gaul.modernizer_maven_annotations.SuppressModernizer;
 import org.opendaylight.controller.blueprint.BlueprintContainerRestartService;
 import org.osgi.framework.Bundle;
 import org.osgi.framework.Constants;
+import org.osgi.framework.FrameworkUtil;
 import org.osgi.framework.ServiceRegistration;
-import org.osgi.service.blueprint.reflect.BeanProperty;
-import org.osgi.service.blueprint.reflect.ComponentMetadata;
 import org.osgi.service.blueprint.reflect.ValueMetadata;
 import org.osgi.service.cm.ManagedService;
 import org.slf4j.Logger;
@@ -45,8 +43,8 @@ public class ComponentProcessor implements ComponentDefinitionRegistryProcessor
     private static final String CM_PERSISTENT_ID_PROPERTY = "persistentId";
 
     private final List<ServiceRegistration<?>> managedServiceRegs = new ArrayList<>();
-    private Bundle bundle;
-    private BlueprintContainerRestartService blueprintContainerRestartService;
+    private Bundle bundle = null;
+    private BlueprintContainerRestartService blueprintContainerRestartService = null;
     private boolean restartDependentsOnUpdates;
     private boolean useDefaultForReferenceTypes;
 
@@ -55,7 +53,7 @@ public class ComponentProcessor implements ComponentDefinitionRegistryProcessor
     }
 
     public void setBlueprintContainerRestartService(final BlueprintContainerRestartService restartService) {
-        this.blueprintContainerRestartService = restartService;
+        blueprintContainerRestartService = restartService;
     }
 
     public void setRestartDependentsOnUpdates(final boolean restartDependentsOnUpdates) {
@@ -67,21 +65,19 @@ public class ComponentProcessor implements ComponentDefinitionRegistryProcessor
     }
 
     public void destroy() {
-        for (ServiceRegistration<?> reg: managedServiceRegs) {
-            AriesFrameworkUtil.safeUnregisterService(reg);
-        }
+        managedServiceRegs.forEach(AriesFrameworkUtil::safeUnregisterService);
     }
 
     @Override
     public void process(final ComponentDefinitionRegistry registry) {
         LOG.debug("{}: In process",  logName());
 
-        for (String name : registry.getComponentDefinitionNames()) {
-            ComponentMetadata component = registry.getComponentDefinition(name);
-            if (component instanceof MutableBeanMetadata) {
-                processMutableBeanMetadata((MutableBeanMetadata) component);
-            } else if (component instanceof MutableServiceReferenceMetadata) {
-                processServiceReferenceMetadata((MutableServiceReferenceMetadata)component);
+        for (var name : registry.getComponentDefinitionNames()) {
+            final var component = registry.getComponentDefinition(name);
+            if (component instanceof MutableBeanMetadata bean) {
+                processMutableBeanMetadata(bean);
+            } else if (component instanceof MutableServiceReferenceMetadata serviceRef) {
+                processServiceReferenceMetadata(serviceRef);
             }
         }
     }
@@ -112,18 +108,15 @@ public class ComponentProcessor implements ComponentDefinitionRegistryProcessor
             LOG.debug("{}: Found PropertyPlaceholder bean: {}, runtime {}", logName(), bean.getId(),
                     bean.getRuntimeClass());
 
-            for (BeanProperty prop : bean.getProperties()) {
+            for (var prop : bean.getProperties()) {
                 if (CM_PERSISTENT_ID_PROPERTY.equals(prop.getName())) {
-                    if (prop.getValue() instanceof ValueMetadata) {
-                        ValueMetadata persistentId = (ValueMetadata)prop.getValue();
-
-                        LOG.debug("{}: Found {} property, value : {}", logName(),
-                                CM_PERSISTENT_ID_PROPERTY, persistentId.getStringValue());
-
+                    if (prop.getValue() instanceof ValueMetadata persistentId) {
+                        LOG.debug("{}: Found {} property, value : {}", logName(), CM_PERSISTENT_ID_PROPERTY,
+                            persistentId.getStringValue());
                         registerManagedService(persistentId.getStringValue());
                     } else {
-                        LOG.debug("{}: {} property metadata {} is not instanceof ValueMetadata",
-                                logName(), CM_PERSISTENT_ID_PROPERTY, prop.getValue());
+                        LOG.debug("{}: {} property metadata {} is not instanceof ValueMetadata", logName(),
+                            CM_PERSISTENT_ID_PROPERTY, prop.getValue());
                     }
 
                     break;
@@ -132,11 +125,10 @@ public class ComponentProcessor implements ComponentDefinitionRegistryProcessor
         }
     }
 
-    @SuppressModernizer
     private void registerManagedService(final String persistentId) {
         // Register a ManagedService so we get updates from the ConfigAdmin when the cfg file corresponding
         // to the persistentId changes.
-        final ManagedService managedService = new ManagedService() {
+        final var managedService = new ManagedService() {
             private final AtomicBoolean initialUpdate = new AtomicBoolean(true);
             private volatile Dictionary<String, ?> previousProperties;
 
@@ -156,11 +148,11 @@ public class ComponentProcessor implements ComponentDefinitionRegistryProcessor
             }
         };
 
-        Dictionary<String, Object> props = new Hashtable<>();
-        props.put(Constants.SERVICE_PID, persistentId);
-        props.put(Constants.BUNDLE_SYMBOLICNAME, bundle.getSymbolicName());
-        props.put(Constants.BUNDLE_VERSION, bundle.getHeaders().get(Constants.BUNDLE_VERSION));
-        managedServiceRegs.add(bundle.getBundleContext().registerService(ManagedService.class, managedService, props));
+        managedServiceRegs.add(bundle.getBundleContext().registerService(ManagedService.class, managedService,
+            FrameworkUtil.asDictionary(Map.of(
+                Constants.SERVICE_PID, persistentId,
+                Constants.BUNDLE_SYMBOLICNAME, bundle.getSymbolicName(),
+                Constants.BUNDLE_VERSION, bundle.getHeaders().get(Constants.BUNDLE_VERSION)))));
     }
 
     private String logName() {
index c1474929551c25e64f4998f926ee0b4e0407b29d..4dea3404f9f073677541678671786e7627f9293b 100644 (file)
@@ -14,7 +14,6 @@ import java.io.InputStream;
 import java.net.URISyntaxException;
 import java.net.URL;
 import java.util.Optional;
-import javax.xml.parsers.ParserConfigurationException;
 import javax.xml.stream.XMLStreamException;
 import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
 import org.opendaylight.mdsal.dom.api.DOMSchemaService;
@@ -54,7 +53,7 @@ public class DataStoreAppConfigDefaultXMLReader<T extends DataObject> {
     @FunctionalInterface
     public interface FallbackConfigProvider {
         NormalizedNode get(SchemaTreeInference dataSchema)
-            throws IOException, XMLStreamException, ParserConfigurationException, SAXException, URISyntaxException;
+            throws IOException, XMLStreamException, SAXException, URISyntaxException;
     }
 
     @FunctionalInterface
@@ -93,8 +92,8 @@ public class DataStoreAppConfigDefaultXMLReader<T extends DataObject> {
         return Resources.getResource(testClass, defaultAppConfigFileName);
     }
 
-    public T createDefaultInstance() throws ConfigXMLReaderException, ParserConfigurationException, XMLStreamException,
-            IOException, SAXException, URISyntaxException {
+    public T createDefaultInstance() throws ConfigXMLReaderException, XMLStreamException, IOException, SAXException,
+            URISyntaxException {
         return createDefaultInstance(dataSchema -> {
             throw new IllegalArgumentException(
                 "Failed to read XML (not creating model from defaults as runtime would, for better clarity in tests)");
@@ -103,7 +102,7 @@ public class DataStoreAppConfigDefaultXMLReader<T extends DataObject> {
 
     @SuppressWarnings("unchecked")
     public T createDefaultInstance(final FallbackConfigProvider fallback) throws ConfigXMLReaderException,
-            URISyntaxException, ParserConfigurationException, XMLStreamException, SAXException, IOException {
+            URISyntaxException, XMLStreamException, SAXException, IOException {
         YangInstanceIdentifier yangPath = bindingSerializer.toYangInstanceIdentifier(bindingContext.appConfigPath);
 
         LOG.debug("{}: Creating app config instance from path {}, Qname: {}", logName, yangPath,
@@ -176,7 +175,7 @@ public class DataStoreAppConfigDefaultXMLReader<T extends DataObject> {
         if (!optionalURL.isPresent()) {
             return null;
         }
-        URL url = optionalURL.get();
+        URL url = optionalURL.orElseThrow();
         try (InputStream is = url.openStream()) {
             Document root = UntrustedXML.newDocumentBuilder().parse(is);
             NormalizedNode dataNode = bindingContext.parseDataElement(root.getDocumentElement(),
@@ -185,8 +184,7 @@ public class DataStoreAppConfigDefaultXMLReader<T extends DataObject> {
             LOG.debug("{}: Parsed data node: {}", logName, dataNode);
 
             return dataNode;
-        } catch (final IOException | SAXException | XMLStreamException | ParserConfigurationException
-                | URISyntaxException e) {
+        } catch (final IOException | SAXException | XMLStreamException | URISyntaxException e) {
             String msg = String.format("%s: Could not read/parse app config %s", logName, url);
             LOG.error(msg, e);
             throw new ConfigXMLReaderException(msg, e);
index 5b20827c7fdadd217219ee660a89df6fd5bda919..35cdf03e4c69e2b747f059a9f3e2c6cc81658525 100644 (file)
@@ -17,13 +17,11 @@ import java.util.Collection;
 import java.util.Objects;
 import java.util.Optional;
 import java.util.concurrent.atomic.AtomicBoolean;
-import javax.xml.parsers.ParserConfigurationException;
 import javax.xml.stream.XMLStreamException;
 import org.apache.aries.blueprint.services.ExtendedBlueprintContainer;
 import org.eclipse.jdt.annotation.NonNull;
 import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.blueprint.ext.DataStoreAppConfigDefaultXMLReader.ConfigURLProvider;
-import org.opendaylight.mdsal.binding.api.ClusteredDataTreeChangeListener;
 import org.opendaylight.mdsal.binding.api.DataBroker;
 import org.opendaylight.mdsal.binding.api.DataObjectModification;
 import org.opendaylight.mdsal.binding.api.DataObjectModification.ModificationType;
@@ -33,7 +31,8 @@ import org.opendaylight.mdsal.binding.api.ReadTransaction;
 import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.mdsal.dom.api.DOMSchemaService;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.binding.ChildOf;
 import org.opendaylight.yangtools.yang.binding.DataObject;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.model.api.SchemaTreeInference;
@@ -70,7 +69,7 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor
     private final AtomicBoolean readingInitialAppConfig = new AtomicBoolean(true);
 
     private volatile BindingContext bindingContext;
-    private volatile ListenerRegistration<?> appConfigChangeListenerReg;
+    private volatile Registration appConfigChangeListenerReg;
     private volatile DataObject currentAppConfig;
 
     // Note: the BindingNormalizedNodeSerializer interface is annotated as deprecated because there's an
@@ -86,7 +85,7 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor
         this.defaultAppConfigFileName = defaultAppConfigFileName;
         this.appConfigBindingClassName = appConfigBindingClassName;
         this.appConfigListKeyValue = appConfigListKeyValue;
-        this.appConfigUpdateStrategy = updateStrategyValue;
+        appConfigUpdateStrategy = updateStrategyValue;
     }
 
     @Override
@@ -97,10 +96,10 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor
         Class<DataObject> appConfigBindingClass;
         try {
             Class<?> bindingClass = container.getBundleContext().getBundle().loadClass(appConfigBindingClassName);
-            if (!DataObject.class.isAssignableFrom(bindingClass)) {
+            if (!ChildOf.class.isAssignableFrom(bindingClass)) {
                 throw new ComponentDefinitionException(String.format(
                         "%s: Specified app config binding class %s does not extend %s",
-                        logName(), appConfigBindingClassName, DataObject.class.getName()));
+                        logName(), appConfigBindingClassName, ChildOf.class.getName()));
             }
 
             appConfigBindingClass = (Class<DataObject>) bindingClass;
@@ -143,15 +142,12 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor
 
         setDependencyDesc("Initial app config " + bindingContext.appConfigBindingClass.getSimpleName());
 
-        // We register a DTCL to get updates and also read the app config data from the data store. If
-        // the app config data is present then both the read and initial DTCN update will return it. If the
-        // the data isn't present, we won't get an initial DTCN update so the read will indicate the data
-        // isn't present.
-
-        DataTreeIdentifier<DataObject> dataTreeId = DataTreeIdentifier.create(LogicalDatastoreType.CONFIGURATION,
-                bindingContext.appConfigPath);
-        appConfigChangeListenerReg = dataBroker.registerDataTreeChangeListener(dataTreeId,
-                (ClusteredDataTreeChangeListener<DataObject>) this::onAppConfigChanged);
+        // We register a DTCL to get updates and also read the app config data from the data store. If the app config
+        // data is present then both the read and initial DTCN update will return it. If the the data isn't present, we
+        // will not get an initial DTCN update so the read will indicate the data is not present.
+        appConfigChangeListenerReg = dataBroker.registerTreeChangeListener(
+            DataTreeIdentifier.of(LogicalDatastoreType.CONFIGURATION, bindingContext.appConfigPath),
+            this::onAppConfigChanged);
 
         readInitialAppConfig(dataBroker);
     }
@@ -187,12 +183,12 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor
     private void onAppConfigChanged(final Collection<DataTreeModification<DataObject>> changes) {
         for (DataTreeModification<DataObject> change: changes) {
             DataObjectModification<DataObject> changeRoot = change.getRootNode();
-            ModificationType type = changeRoot.getModificationType();
+            ModificationType type = changeRoot.modificationType();
 
             LOG.debug("{}: onAppConfigChanged: {}, {}", logName(), type, change.getRootPath());
 
             if (type == ModificationType.SUBTREE_MODIFIED || type == ModificationType.WRITE) {
-                DataObject newAppConfig = changeRoot.getDataAfter();
+                DataObject newAppConfig = changeRoot.dataAfter();
 
                 LOG.debug("New app config instance: {}, previous: {}", newAppConfig, currentAppConfig);
 
@@ -219,7 +215,7 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor
         if (result) {
             DataObject localAppConfig;
             if (possibleAppConfig.isPresent()) {
-                localAppConfig = possibleAppConfig.get();
+                localAppConfig = possibleAppConfig.orElseThrow();
             } else {
                 // No app config data is present so create an empty instance via the bindingSerializer service.
                 // This will also return default values for leafs that haven't been explicitly set.
@@ -267,8 +263,7 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor
                 }
             });
 
-        } catch (final ConfigXMLReaderException | IOException | SAXException | XMLStreamException
-                | ParserConfigurationException | URISyntaxException e) {
+        } catch (ConfigXMLReaderException | IOException | SAXException | XMLStreamException | URISyntaxException e) {
             if (e.getCause() == null) {
                 setFailureMessage(e.getMessage());
             } else {
@@ -279,7 +274,7 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor
     }
 
     private @Nullable NormalizedNode parsePossibleDefaultAppConfigElement(final SchemaTreeInference dataSchema)
-            throws URISyntaxException, IOException, ParserConfigurationException, SAXException, XMLStreamException {
+            throws URISyntaxException, IOException, SAXException, XMLStreamException {
         if (defaultAppConfigElement == null) {
             return null;
         }
diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/NotificationListenerBean.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/NotificationListenerBean.java
deleted file mode 100644 (file)
index 74c2956..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2016 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import org.opendaylight.mdsal.binding.api.NotificationService;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.NotificationListener;
-import org.osgi.framework.Bundle;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Blueprint bean corresponding to the "notification-listener" element that registers a NotificationListener
- * with the NotificationService.
- *
- * @author Thomas Pantelis
- */
-public class NotificationListenerBean {
-    private static final Logger LOG = LoggerFactory.getLogger(NotificationListenerBean.class);
-    static final String NOTIFICATION_LISTENER = "notification-listener";
-
-    private Bundle bundle;
-    private NotificationService notificationService;
-    private NotificationListener notificationListener;
-    private ListenerRegistration<?> registration;
-
-    public void setNotificationService(final NotificationService notificationService) {
-        this.notificationService = notificationService;
-    }
-
-    public void setNotificationListener(final NotificationListener notificationListener) {
-        this.notificationListener = notificationListener;
-    }
-
-    public void setBundle(final Bundle bundle) {
-        this.bundle = bundle;
-    }
-
-    public void init() {
-        LOG.debug("{}: init - registering NotificationListener {}", bundle.getSymbolicName(), notificationListener);
-
-        registration = notificationService.registerNotificationListener(notificationListener);
-    }
-
-    public void destroy() {
-        if (registration != null) {
-            LOG.debug("{}: destroy - closing ListenerRegistration {}", bundle.getSymbolicName(), notificationListener);
-            registration.close();
-        } else {
-            LOG.debug("{}: destroy - listener was not registered", bundle.getSymbolicName());
-        }
-    }
-}
index af26acae0a16e1d7fd1a40d67181b8f2f682a51b..371b7efecdec1fa6e67f700acce75e10aa4beebd 100644 (file)
@@ -11,12 +11,10 @@ import com.google.common.base.Strings;
 import java.io.IOException;
 import java.io.StringReader;
 import java.net.URL;
-import java.util.Collections;
 import java.util.Set;
 import org.apache.aries.blueprint.ComponentDefinitionRegistry;
 import org.apache.aries.blueprint.NamespaceHandler;
 import org.apache.aries.blueprint.ParserContext;
-import org.apache.aries.blueprint.ext.ComponentFactoryMetadata;
 import org.apache.aries.blueprint.mutable.MutableBeanMetadata;
 import org.apache.aries.blueprint.mutable.MutableRefMetadata;
 import org.apache.aries.blueprint.mutable.MutableReferenceMetadata;
@@ -24,10 +22,6 @@ import org.apache.aries.blueprint.mutable.MutableServiceMetadata;
 import org.apache.aries.blueprint.mutable.MutableServiceReferenceMetadata;
 import org.apache.aries.blueprint.mutable.MutableValueMetadata;
 import org.opendaylight.controller.blueprint.BlueprintContainerRestartService;
-import org.opendaylight.mdsal.binding.api.NotificationService;
-import org.opendaylight.mdsal.binding.api.RpcProviderService;
-import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
-import org.opendaylight.mdsal.dom.api.DOMSchemaService;
 import org.opendaylight.yangtools.util.xml.UntrustedXML;
 import org.osgi.service.blueprint.container.ComponentDefinitionException;
 import org.osgi.service.blueprint.reflect.BeanMetadata;
@@ -54,32 +48,20 @@ import org.xml.sax.SAXException;
  */
 public final class OpendaylightNamespaceHandler implements NamespaceHandler {
     public static final String NAMESPACE_1_0_0 = "http://opendaylight.org/xmlns/blueprint/v1.0.0";
-    static final String ROUTED_RPC_REG_CONVERTER_NAME = "org.opendaylight.blueprint.RoutedRpcRegConverter";
-    static final String DOM_RPC_PROVIDER_SERVICE_NAME = "org.opendaylight.blueprint.DOMRpcProviderService";
-    static final String RPC_REGISTRY_NAME = "org.opendaylight.blueprint.RpcRegistry";
-    static final String BINDING_RPC_PROVIDER_SERVICE_NAME = "org.opendaylight.blueprint.RpcProviderService";
-    static final String SCHEMA_SERVICE_NAME = "org.opendaylight.blueprint.SchemaService";
-    static final String NOTIFICATION_SERVICE_NAME = "org.opendaylight.blueprint.NotificationService";
-    static final String TYPE_ATTR = "type";
-    static final String UPDATE_STRATEGY_ATTR = "update-strategy";
 
     private static final Logger LOG = LoggerFactory.getLogger(OpendaylightNamespaceHandler.class);
+    private static final String TYPE_ATTR = "type";
+    private static final String UPDATE_STRATEGY_ATTR = "update-strategy";
     private static final String COMPONENT_PROCESSOR_NAME = ComponentProcessor.class.getName();
     private static final String RESTART_DEPENDENTS_ON_UPDATES = "restart-dependents-on-updates";
     private static final String USE_DEFAULT_FOR_REFERENCE_TYPES = "use-default-for-reference-types";
     private static final String CLUSTERED_APP_CONFIG = "clustered-app-config";
-    private static final String INTERFACE = "interface";
-    private static final String REF_ATTR = "ref";
     private static final String ID_ATTR = "id";
-    private static final String RPC_SERVICE = "rpc-service";
-    private static final String ACTION_SERVICE = "action-service";
-    private static final String SPECIFIC_SERVICE_REF_LIST = "specific-reference-list";
-    private static final String STATIC_REFERENCE = "static-reference";
 
     @SuppressWarnings("rawtypes")
     @Override
     public Set<Class> getManagedClasses() {
-        return Collections.emptySet();
+        return Set.of();
     }
 
     @Override
@@ -97,22 +79,8 @@ public final class OpendaylightNamespaceHandler implements NamespaceHandler {
     public Metadata parse(final Element element, final ParserContext context) {
         LOG.debug("In parse for {}", element);
 
-        if (nodeNameEquals(element, RpcImplementationBean.RPC_IMPLEMENTATION)) {
-            return parseRpcImplementation(element, context);
-        } else if (nodeNameEquals(element, RPC_SERVICE)) {
-            return parseRpcService(element, context);
-        } else if (nodeNameEquals(element, NotificationListenerBean.NOTIFICATION_LISTENER)) {
-            return parseNotificationListener(element, context);
-        } else if (nodeNameEquals(element, CLUSTERED_APP_CONFIG)) {
+        if (nodeNameEquals(element, CLUSTERED_APP_CONFIG)) {
             return parseClusteredAppConfig(element, context);
-        } else if (nodeNameEquals(element, SPECIFIC_SERVICE_REF_LIST)) {
-            return parseSpecificReferenceList(element, context);
-        } else if (nodeNameEquals(element, STATIC_REFERENCE)) {
-            return parseStaticReference(element, context);
-        } else if (nodeNameEquals(element, ACTION_SERVICE)) {
-            return parseActionService(element, context);
-        } else if (nodeNameEquals(element, ActionProviderBean.ACTION_PROVIDER)) {
-            return parseActionProvider(element, context);
         }
 
         throw new ComponentDefinitionException("Unsupported standalone element: " + element.getNodeName());
@@ -144,12 +112,10 @@ public final class OpendaylightNamespaceHandler implements NamespaceHandler {
 
     private static ComponentMetadata decorateServiceType(final Attr attr, final ComponentMetadata component,
             final ParserContext context) {
-        if (!(component instanceof MutableServiceMetadata)) {
+        if (!(component instanceof MutableServiceMetadata service)) {
             throw new ComponentDefinitionException("Expected an instanceof MutableServiceMetadata");
         }
 
-        MutableServiceMetadata service = (MutableServiceMetadata)component;
-
         LOG.debug("decorateServiceType for {} - adding type property {}", service.getId(), attr.getValue());
 
         service.addServiceProperty(createValue(context, TYPE_ATTR), createValue(context, attr.getValue()));
@@ -235,107 +201,6 @@ public final class OpendaylightNamespaceHandler implements NamespaceHandler {
         return metadata;
     }
 
-    private static Metadata parseActionProvider(final Element element, final ParserContext context) {
-        registerDomRpcProviderServiceRefBean(context);
-        registerBindingRpcProviderServiceRefBean(context);
-        registerSchemaServiceRefBean(context);
-
-        MutableBeanMetadata metadata = createBeanMetadata(context, context.generateId(), ActionProviderBean.class,
-                true, true);
-        addBlueprintBundleRefProperty(context, metadata);
-        metadata.addProperty("domRpcProvider", createRef(context, DOM_RPC_PROVIDER_SERVICE_NAME));
-        metadata.addProperty("bindingRpcProvider", createRef(context, BINDING_RPC_PROVIDER_SERVICE_NAME));
-        metadata.addProperty("schemaService", createRef(context, SCHEMA_SERVICE_NAME));
-        metadata.addProperty("interfaceName", createValue(context, element.getAttribute(INTERFACE)));
-
-        if (element.hasAttribute(REF_ATTR)) {
-            metadata.addProperty("implementation", createRef(context, element.getAttribute(REF_ATTR)));
-        }
-
-        LOG.debug("parseActionProvider returning {}", metadata);
-        return metadata;
-    }
-
-
-    private static Metadata parseRpcImplementation(final Element element, final ParserContext context) {
-        registerBindingRpcProviderServiceRefBean(context);
-
-        MutableBeanMetadata metadata = createBeanMetadata(context, context.generateId(), RpcImplementationBean.class,
-                true, true);
-        addBlueprintBundleRefProperty(context, metadata);
-        metadata.addProperty("rpcProvider", createRef(context, BINDING_RPC_PROVIDER_SERVICE_NAME));
-        metadata.addProperty("implementation", createRef(context, element.getAttribute(REF_ATTR)));
-
-        if (element.hasAttribute(INTERFACE)) {
-            metadata.addProperty("interfaceName", createValue(context, element.getAttribute(INTERFACE)));
-        }
-
-        LOG.debug("parseRpcImplementation returning {}", metadata);
-        return metadata;
-    }
-
-    private static Metadata parseActionService(final Element element, final ParserContext context) {
-        ComponentFactoryMetadata metadata = new ActionServiceMetadata(getId(context, element),
-                element.getAttribute(INTERFACE));
-
-        LOG.debug("parseActionService returning {}", metadata);
-
-        return metadata;
-    }
-
-    private static Metadata parseRpcService(final Element element, final ParserContext context) {
-        ComponentFactoryMetadata metadata = new RpcServiceMetadata(getId(context, element),
-                element.getAttribute(INTERFACE));
-
-        LOG.debug("parseRpcService returning {}", metadata);
-
-        return metadata;
-    }
-
-    private static void registerDomRpcProviderServiceRefBean(final ParserContext context) {
-        registerRefBean(context, DOM_RPC_PROVIDER_SERVICE_NAME, DOMRpcProviderService.class);
-    }
-
-    private static void registerBindingRpcProviderServiceRefBean(final ParserContext context) {
-        registerRefBean(context, BINDING_RPC_PROVIDER_SERVICE_NAME, RpcProviderService.class);
-    }
-
-    private static void registerSchemaServiceRefBean(final ParserContext context) {
-        registerRefBean(context, SCHEMA_SERVICE_NAME, DOMSchemaService.class);
-    }
-
-    private static void registerRefBean(final ParserContext context, final String name, final Class<?> clazz) {
-        ComponentDefinitionRegistry registry = context.getComponentDefinitionRegistry();
-        if (registry.getComponentDefinition(name) == null) {
-            MutableReferenceMetadata metadata = createServiceRef(context, clazz, null);
-            metadata.setId(name);
-            registry.registerComponentDefinition(metadata);
-        }
-    }
-
-    private static Metadata parseNotificationListener(final Element element, final ParserContext context) {
-        registerNotificationServiceRefBean(context);
-
-        MutableBeanMetadata metadata = createBeanMetadata(context, context.generateId(), NotificationListenerBean.class,
-                true, true);
-        addBlueprintBundleRefProperty(context, metadata);
-        metadata.addProperty("notificationService", createRef(context, NOTIFICATION_SERVICE_NAME));
-        metadata.addProperty("notificationListener", createRef(context, element.getAttribute(REF_ATTR)));
-
-        LOG.debug("parseNotificationListener returning {}", metadata);
-
-        return metadata;
-    }
-
-    private static void registerNotificationServiceRefBean(final ParserContext context) {
-        ComponentDefinitionRegistry registry = context.getComponentDefinitionRegistry();
-        if (registry.getComponentDefinition(NOTIFICATION_SERVICE_NAME) == null) {
-            MutableReferenceMetadata metadata = createServiceRef(context, NotificationService.class, null);
-            metadata.setId(NOTIFICATION_SERVICE_NAME);
-            registry.registerComponentDefinition(metadata);
-        }
-    }
-
     private static Metadata parseClusteredAppConfig(final Element element, final ParserContext context) {
         LOG.debug("parseClusteredAppConfig");
 
@@ -383,24 +248,6 @@ public final class OpendaylightNamespaceHandler implements NamespaceHandler {
         }
     }
 
-    private static Metadata parseSpecificReferenceList(final Element element, final ParserContext context) {
-        ComponentFactoryMetadata metadata = new SpecificReferenceListMetadata(getId(context, element),
-                element.getAttribute(INTERFACE));
-
-        LOG.debug("parseSpecificReferenceList returning {}", metadata);
-
-        return metadata;
-    }
-
-    private static Metadata parseStaticReference(final Element element, final ParserContext context) {
-        ComponentFactoryMetadata metadata = new StaticReferenceMetadata(getId(context, element),
-                element.getAttribute(INTERFACE));
-
-        LOG.debug("parseStaticReference returning {}", metadata);
-
-        return metadata;
-    }
-
     private static Element parseXML(final String name, final String xml) {
         try {
             return UntrustedXML.newDocumentBuilder().parse(new InputSource(new StringReader(xml))).getDocumentElement();
diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcImplementationBean.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcImplementationBean.java
deleted file mode 100644 (file)
index 94d5b3b..0000000
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (c) 2016 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import com.google.common.base.Strings;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import org.opendaylight.mdsal.binding.api.RpcProviderService;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.osgi.framework.Bundle;
-import org.osgi.service.blueprint.container.ComponentDefinitionException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Blueprint bean corresponding to the "rpc-implementation" element that registers an RPC implementation with
- * the RpcProviderRegistry.
- *
- * @author Thomas Pantelis
- */
-public class RpcImplementationBean {
-    private static final Logger LOG = LoggerFactory.getLogger(RpcImplementationBean.class);
-    static final String RPC_IMPLEMENTATION = "rpc-implementation";
-
-    private RpcProviderService rpcProvider;
-    private Bundle bundle;
-    private String interfaceName;
-    private RpcService implementation;
-    private final List<ObjectRegistration<RpcService>> rpcRegistrations = new ArrayList<>();
-
-    public void setRpcProvider(final RpcProviderService rpcProvider) {
-        this.rpcProvider = rpcProvider;
-    }
-
-    public void setBundle(final Bundle bundle) {
-        this.bundle = bundle;
-    }
-
-    public void setInterfaceName(final String interfaceName) {
-        this.interfaceName = interfaceName;
-    }
-
-    public void setImplementation(final RpcService implementation) {
-        this.implementation = implementation;
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void init() {
-        try {
-            List<Class<RpcService>> rpcInterfaces = getImplementedRpcServiceInterfaces(interfaceName,
-                    implementation.getClass(), bundle, RPC_IMPLEMENTATION);
-
-            LOG.debug("{}: init - adding implementation {} for RpcService interface(s) {}", bundle.getSymbolicName(),
-                    implementation, rpcInterfaces);
-
-            for (Class<RpcService> rpcInterface : rpcInterfaces) {
-                rpcRegistrations.add(rpcProvider.registerRpcImplementation(rpcInterface, implementation));
-            }
-        } catch (final ComponentDefinitionException e) {
-            throw e;
-        } catch (final Exception e) {
-            throw new ComponentDefinitionException(String.format(
-                    "Error processing \"%s\" for %s", RPC_IMPLEMENTATION, implementation.getClass()), e);
-        }
-    }
-
-    public void destroy() {
-        for (ObjectRegistration<RpcService> reg: rpcRegistrations) {
-            reg.close();
-        }
-    }
-
-    @SuppressWarnings("unchecked")
-    static List<Class<RpcService>> getImplementedRpcServiceInterfaces(final String interfaceName,
-            final Class<?> implementationClass, final Bundle bundle, final String logName)
-            throws ClassNotFoundException {
-        if (!Strings.isNullOrEmpty(interfaceName)) {
-            Class<?> rpcInterface = bundle.loadClass(interfaceName);
-
-            if (!rpcInterface.isAssignableFrom(implementationClass)) {
-                throw new ComponentDefinitionException(String.format(
-                        "The specified \"interface\" %s for \"%s\" is not implemented by RpcService \"ref\" %s",
-                        interfaceName, logName, implementationClass));
-            }
-
-            return Collections.singletonList((Class<RpcService>)rpcInterface);
-        }
-
-        List<Class<RpcService>> rpcInterfaces = new ArrayList<>();
-        for (Class<?> intface : implementationClass.getInterfaces()) {
-            if (RpcService.class.isAssignableFrom(intface)) {
-                rpcInterfaces.add((Class<RpcService>) intface);
-            }
-        }
-
-        if (rpcInterfaces.isEmpty()) {
-            throw new ComponentDefinitionException(String.format(
-                    "The \"ref\" instance %s for \"%s\" does not implemented any RpcService interfaces",
-                    implementationClass, logName));
-        }
-
-        return rpcInterfaces;
-    }
-}
diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcServiceMetadata.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcServiceMetadata.java
deleted file mode 100644 (file)
index 4ab3867..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (c) 2016 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import java.util.function.Predicate;
-import org.opendaylight.mdsal.dom.spi.RpcRoutingStrategy;
-
-/**
- * Factory metadata corresponding to the "rpc-service" element that gets an RPC service implementation from
- * the RpcProviderRegistry and provides it to the Blueprint container.
- *
- * @author Thomas Pantelis
- */
-final class RpcServiceMetadata extends AbstractInvokableServiceMetadata {
-    RpcServiceMetadata(final String id, final String interfaceName) {
-        super(id, interfaceName);
-    }
-
-    @Override
-    Predicate<RpcRoutingStrategy> rpcFilter() {
-        return s -> !s.isContextBasedRouted();
-    }
-}
diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcUtil.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcUtil.java
deleted file mode 100644 (file)
index 331aac7..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.function.Predicate;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.mdsal.dom.spi.RpcRoutingStrategy;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.common.QNameModule;
-import org.opendaylight.yangtools.yang.model.api.Module;
-import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Utility methods for dealing with various aspects of RPCs and actions.
- *
- * @author Robert Varga
- */
-final class RpcUtil {
-    private static final Logger LOG = LoggerFactory.getLogger(RpcUtil.class);
-
-    private RpcUtil() {
-        throw new UnsupportedOperationException();
-    }
-
-    static Collection<QName> decomposeRpcService(final Class<RpcService> service,
-            final SchemaContext schemaContext, final Predicate<RpcRoutingStrategy> filter) {
-        final QNameModule moduleName = BindingReflections.getQNameModule(service);
-        final Module module = schemaContext.findModule(moduleName).orElseThrow(() -> new IllegalArgumentException(
-                "Module not found in SchemaContext: " + moduleName + "; service: " + service));
-        LOG.debug("Resolved service {} to module {}", service, module);
-
-        final Collection<? extends RpcDefinition> rpcs = module.getRpcs();
-        final Collection<QName> ret = new ArrayList<>(rpcs.size());
-        for (RpcDefinition rpc : rpcs) {
-            final RpcRoutingStrategy strategy = RpcRoutingStrategy.from(rpc);
-            if (filter.test(strategy)) {
-                ret.add(rpc.getQName());
-            }
-        }
-
-        return ret;
-    }
-}
diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/SpecificReferenceListMetadata.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/SpecificReferenceListMetadata.java
deleted file mode 100644 (file)
index 0412f00..0000000
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * Copyright (c) 2016 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.io.Resources;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.io.IOException;
-import java.net.URL;
-import java.nio.charset.StandardCharsets;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.concurrent.ConcurrentSkipListSet;
-import org.osgi.framework.Bundle;
-import org.osgi.framework.BundleEvent;
-import org.osgi.framework.ServiceReference;
-import org.osgi.service.blueprint.container.ComponentDefinitionException;
-import org.osgi.util.tracker.BundleTracker;
-import org.osgi.util.tracker.BundleTrackerCustomizer;
-import org.osgi.util.tracker.ServiceTracker;
-import org.osgi.util.tracker.ServiceTrackerCustomizer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Factory metadata corresponding to the "specific-reference-list" element that obtains a specific list
- * of service instances from the OSGi registry for a given interface. The specific list is learned by first
- * extracting the list of expected service types by inspecting RESOLVED bundles for a resource file under
- * META-INF/services with the same name as the given interface. The type(s) listed in the resource file
- * must match the "type" property of the advertised service(s). In this manner, an app bundle announces the
- * service type(s) that it will advertise so that this class knows which services to expect up front. Once
- * all the expected services are obtained, the container is notified that all dependencies of this component
- * factory are satisfied.
- *
- * @author Thomas Pantelis
- */
-class SpecificReferenceListMetadata extends AbstractDependentComponentFactoryMetadata {
-    private static final Logger LOG = LoggerFactory.getLogger(SpecificReferenceListMetadata.class);
-
-    private final String interfaceName;
-    private final String serviceResourcePath;
-    private final Collection<String> expectedServiceTypes = new ConcurrentSkipListSet<>();
-    private final Collection<String> retrievedServiceTypes = new ConcurrentSkipListSet<>();
-    private final Collection<Object> retrievedServices = Collections.synchronizedList(new ArrayList<>());
-    private volatile BundleTracker<Bundle> bundleTracker;
-    private volatile ServiceTracker<Object, Object> serviceTracker;
-
-    SpecificReferenceListMetadata(final String id, final String interfaceName) {
-        super(id);
-        this.interfaceName = interfaceName;
-        serviceResourcePath = "META-INF/services/" + interfaceName;
-    }
-
-    @Override
-    protected void startTracking() {
-        BundleTrackerCustomizer<Bundle> bundleListener = new BundleTrackerCustomizer<>() {
-            @Override
-            public Bundle addingBundle(final Bundle bundle, final BundleEvent event) {
-                bundleAdded(bundle);
-                return bundle;
-            }
-
-            @Override
-            public void modifiedBundle(final Bundle bundle, final BundleEvent event, final Bundle object) {
-            }
-
-            @Override
-            public void removedBundle(final Bundle bundle, final BundleEvent event, final Bundle object) {
-            }
-        };
-
-        bundleTracker = new BundleTracker<>(container().getBundleContext(), Bundle.RESOLVED | Bundle.STARTING
-                | Bundle.STOPPING | Bundle.ACTIVE, bundleListener);
-
-        // This will get the list of all current RESOLVED+ bundles.
-        bundleTracker.open();
-
-        if (expectedServiceTypes.isEmpty()) {
-            setSatisfied();
-            return;
-        }
-
-        ServiceTrackerCustomizer<Object, Object> serviceListener = new ServiceTrackerCustomizer<>() {
-            @Override
-            public Object addingService(final ServiceReference<Object> reference) {
-                return serviceAdded(reference);
-            }
-
-            @Override
-            public void modifiedService(final ServiceReference<Object> reference, final Object service) {
-            }
-
-            @Override
-            public void removedService(final ServiceReference<Object> reference, final Object service) {
-                container().getBundleContext().ungetService(reference);
-            }
-        };
-
-        setDependencyDesc(interfaceName + " services with types " + expectedServiceTypes);
-
-        serviceTracker = new ServiceTracker<>(container().getBundleContext(), interfaceName, serviceListener);
-        serviceTracker.open();
-    }
-
-    @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-            justification = "https://github.com/spotbugs/spotbugs/issues/811")
-    private void bundleAdded(final Bundle bundle) {
-        URL resource = bundle.getEntry(serviceResourcePath);
-        if (resource == null) {
-            return;
-        }
-
-        LOG.debug("{}: Found {} resource in bundle {}", logName(), resource, bundle.getSymbolicName());
-
-        try {
-            for (String line : Resources.readLines(resource, StandardCharsets.UTF_8)) {
-                int ci = line.indexOf('#');
-                if (ci >= 0) {
-                    line = line.substring(0, ci);
-                }
-
-                line = line.trim();
-                if (line.isEmpty()) {
-                    continue;
-                }
-
-                String serviceType = line;
-                LOG.debug("{}: Retrieved service type {}", logName(), serviceType);
-                expectedServiceTypes.add(serviceType);
-            }
-        } catch (final IOException e) {
-            setFailure(String.format("%s: Error reading resource %s from bundle %s", logName(), resource,
-                    bundle.getSymbolicName()), e);
-        }
-    }
-
-    @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-            justification = "https://github.com/spotbugs/spotbugs/issues/811")
-    private Object serviceAdded(final ServiceReference<Object> reference) {
-        Object service = container().getBundleContext().getService(reference);
-        String serviceType = (String) reference.getProperty(OpendaylightNamespaceHandler.TYPE_ATTR);
-
-        LOG.debug("{}: Service type {} added from bundle {}", logName(), serviceType,
-                reference.getBundle().getSymbolicName());
-
-        if (serviceType == null) {
-            LOG.error("{}: Missing OSGi service property '{}' for service interface {} in bundle {}", logName(),
-                    OpendaylightNamespaceHandler.TYPE_ATTR, interfaceName,  reference.getBundle().getSymbolicName());
-            return service;
-        }
-
-        if (!expectedServiceTypes.contains(serviceType)) {
-            LOG.error("{}: OSGi service property '{}' for service interface {} in bundle {} was not found in the "
-                    + "expected service types {} obtained via {} bundle resources. Is the bundle resource missing or "
-                    + "the service type misspelled?", logName(), OpendaylightNamespaceHandler.TYPE_ATTR, interfaceName,
-                    reference.getBundle().getSymbolicName(), expectedServiceTypes, serviceResourcePath);
-            return service;
-        }
-
-        // If already satisfied, meaning we got all initial services, then a new bundle must've been
-        // dynamically installed or a prior service's blueprint container was restarted, in which case we
-        // restart our container.
-        if (isSatisfied()) {
-            restartContainer();
-        } else {
-            retrievedServiceTypes.add(serviceType);
-            retrievedServices.add(service);
-
-            if (retrievedServiceTypes.equals(expectedServiceTypes)) {
-                LOG.debug("{}: Got all expected service types", logName());
-                setSatisfied();
-            } else {
-                Set<String> remaining = new HashSet<>(expectedServiceTypes);
-                remaining.removeAll(retrievedServiceTypes);
-                setDependencyDesc(interfaceName + " services with types " + remaining);
-            }
-        }
-
-        return service;
-    }
-
-    @Override
-    public Object create() throws ComponentDefinitionException {
-        LOG.debug("{}: In create: interfaceName: {}", logName(), interfaceName);
-
-        super.onCreate();
-
-        LOG.debug("{}: create returning service list {}", logName(), retrievedServices);
-
-        synchronized (retrievedServices) {
-            return ImmutableList.copyOf(retrievedServices);
-        }
-    }
-
-    @Override
-    public void destroy(final Object instance) {
-        super.destroy(instance);
-
-        if (bundleTracker != null) {
-            bundleTracker.close();
-            bundleTracker = null;
-        }
-
-        if (serviceTracker != null) {
-            serviceTracker.close();
-            serviceTracker = null;
-        }
-    }
-
-    @Override
-    public String toString() {
-        StringBuilder builder = new StringBuilder();
-        builder.append("SpecificReferenceListMetadata [interfaceName=").append(interfaceName)
-                .append(", serviceResourcePath=").append(serviceResourcePath).append("]");
-        return builder.toString();
-    }
-}
diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/StaticReferenceMetadata.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/StaticReferenceMetadata.java
deleted file mode 100644 (file)
index 97c04af..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2016 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import org.osgi.service.blueprint.container.ComponentDefinitionException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Factory metadata corresponding to the "static-reference" element that obtains an OSGi service and
- * returns the actual instance. This differs from the standard "reference" element that returns a dynamic
- * proxy whose underlying service instance can come and go.
- *
- * @author Thomas Pantelis
- */
-class StaticReferenceMetadata extends AbstractDependentComponentFactoryMetadata {
-    private static final Logger LOG = LoggerFactory.getLogger(StaticReferenceMetadata.class);
-
-    private final String interfaceName;
-    private volatile Object retrievedService;
-
-    StaticReferenceMetadata(final String id, final String interfaceName) {
-        super(id);
-        this.interfaceName = interfaceName;
-    }
-
-    @Override
-    protected void startTracking() {
-        retrieveService(interfaceName, interfaceName, service -> {
-            retrievedService = service;
-            setSatisfied();
-        });
-    }
-
-    @Override
-    public Object create() throws ComponentDefinitionException {
-        super.onCreate();
-
-        LOG.debug("{}: create returning service {}", logName(), retrievedService);
-
-        return retrievedService;
-    }
-
-    @Override
-    public String toString() {
-        StringBuilder builder = new StringBuilder();
-        builder.append("StaticReferenceMetadata [interfaceName=").append(interfaceName).append("]");
-        return builder.toString();
-    }
-}
index 525fc82c197762d6a735f81fa20b668dc3048188..26246f9d08876a1785a675a1bb108f230f398be3 100644 (file)
@@ -29,10 +29,6 @@ import org.slf4j.LoggerFactory;
 class StaticServiceReferenceRecipe extends AbstractServiceReferenceRecipe {
     private static final Logger LOG = LoggerFactory.getLogger(StaticServiceReferenceRecipe.class);
 
-    private static final SatisfactionListener NOOP_LISTENER = satisfiable -> {
-        // Intentional NOOP
-    };
-
     private volatile ServiceReference<?> trackedServiceReference;
     private volatile Object trackedService;
     private Consumer<Object> serviceSatisfiedCallback;
@@ -44,8 +40,10 @@ class StaticServiceReferenceRecipe extends AbstractServiceReferenceRecipe {
     }
 
     void startTracking(final Consumer<Object> newServiceSatisfiedCallback) {
-        this.serviceSatisfiedCallback = newServiceSatisfiedCallback;
-        super.start(NOOP_LISTENER);
+        serviceSatisfiedCallback = newServiceSatisfiedCallback;
+        super.start(satisfiable -> {
+            // Intentional NOOP
+        });
     }
 
     @SuppressWarnings("rawtypes")
index 6407efa949c038bf29a740a1fe9b1a54b354e74b..dedf8e1c59b2a3aaadd1eb7daa0971d461a7c396 100644 (file)
@@ -7,7 +7,8 @@
  */
 package org.opendaylight.controller.blueprint.tests;
 
-import static com.google.common.truth.Truth.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThrows;
 
 import org.junit.Test;
 import org.opendaylight.controller.blueprint.ext.DataStoreAppConfigDefaultXMLReader;
@@ -21,26 +22,26 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controll
  * @author Michael Vorburger.ch
  */
 public class DataStoreAppConfigDefaultXMLReaderTest extends AbstractConcurrentDataBrokerTest {
-
     @Test
     public void testConfigXML() throws Exception {
-        Lists lists = new DataStoreAppConfigDefaultXMLReader<>(
-                getClass(), "/opendaylight-sal-test-store-config.xml",
-                getDataBrokerTestCustomizer().getSchemaService(),
-                getDataBrokerTestCustomizer().getAdapterContext().currentSerializer(),
-                Lists.class).createDefaultInstance();
+        Lists lists = new DataStoreAppConfigDefaultXMLReader<>(getClass(), "/opendaylight-sal-test-store-config.xml",
+            getDataBrokerTestCustomizer().getSchemaService(),
+            getDataBrokerTestCustomizer().getAdapterContext().currentSerializer(), Lists.class)
+            .createDefaultInstance();
 
-        UnorderedList element = lists.getUnorderedContainer().getUnorderedList().values().iterator().next();
-        assertThat(element.getName()).isEqualTo("someName");
-        assertThat(element.getValue()).isEqualTo("someValue");
+        UnorderedList element = lists.nonnullUnorderedContainer().nonnullUnorderedList().values().iterator().next();
+        assertEquals("someName", element.getName());
+        assertEquals("someValue", element.getValue());
     }
 
-    @Test(expected = IllegalArgumentException.class)
+    @Test
     public void testBadXMLName() throws Exception {
-        new DataStoreAppConfigDefaultXMLReader<>(
-                getClass(), "/badname.xml",
-                getDataBrokerTestCustomizer().getSchemaService(),
-                getDataBrokerTestCustomizer().getAdapterContext().currentSerializer(),
-                Lists.class).createDefaultInstance();
+        final var reader = new DataStoreAppConfigDefaultXMLReader<>(getClass(), "/badname.xml",
+            getDataBrokerTestCustomizer().getSchemaService(),
+            getDataBrokerTestCustomizer().getAdapterContext().currentSerializer(), Lists.class);
+
+        final String message = assertThrows(IllegalArgumentException.class, reader::createDefaultInstance).getMessage();
+        assertEquals("resource /badname.xml relative to " + DataStoreAppConfigDefaultXMLReaderTest.class.getName()
+            + " not found.", message);
     }
 }
diff --git a/opendaylight/config/netty-event-executor-config/pom.xml b/opendaylight/config/netty-event-executor-config/pom.xml
deleted file mode 100644 (file)
index 39d382f..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.odlparent</groupId>
-    <artifactId>bundle-parent</artifactId>
-    <version>9.0.12</version>
-    <relativePath/>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>netty-event-executor-config</artifactId>
-  <version>0.16.0-SNAPSHOT</version>
-  <packaging>bundle</packaging>
-  <name>${project.artifactId}</name>
-  <description>Configuration Wrapper around netty's event executor</description>
-
-  <dependencies>
-    <dependency>
-      <groupId>com.google.guava</groupId>
-      <artifactId>guava</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>io.netty</groupId>
-      <artifactId>netty-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.osgi</groupId>
-      <artifactId>osgi.cmpn</artifactId>
-    </dependency>
-  </dependencies>
-</project>
diff --git a/opendaylight/config/netty-event-executor-config/src/main/java/org/opendaylight/controller/config/yang/netty/eventexecutor/AutoCloseableEventExecutor.java b/opendaylight/config/netty-event-executor-config/src/main/java/org/opendaylight/controller/config/yang/netty/eventexecutor/AutoCloseableEventExecutor.java
deleted file mode 100644 (file)
index b12fc67..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.netty.eventexecutor;
-
-import com.google.common.reflect.AbstractInvocationHandler;
-import com.google.common.reflect.Reflection;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import io.netty.util.concurrent.EventExecutor;
-import io.netty.util.concurrent.GlobalEventExecutor;
-import io.netty.util.concurrent.ImmediateEventExecutor;
-import java.lang.reflect.Method;
-import java.util.concurrent.TimeUnit;
-
-public interface AutoCloseableEventExecutor extends EventExecutor, AutoCloseable {
-
-    static AutoCloseableEventExecutor globalEventExecutor() {
-        return CloseableEventExecutorMixin.createCloseableProxy(GlobalEventExecutor.INSTANCE);
-    }
-
-    static AutoCloseableEventExecutor immediateEventExecutor() {
-        return CloseableEventExecutorMixin.createCloseableProxy(ImmediateEventExecutor.INSTANCE);
-    }
-
-    class CloseableEventExecutorMixin implements AutoCloseable {
-        public static final int DEFAULT_SHUTDOWN_SECONDS = 1;
-        private final EventExecutor eventExecutor;
-
-        public CloseableEventExecutorMixin(final EventExecutor eventExecutor) {
-            this.eventExecutor = eventExecutor;
-        }
-
-        @Override
-        @SuppressFBWarnings(value = "UC_USELESS_VOID_METHOD", justification = "False positive")
-        public void close() {
-            eventExecutor.shutdownGracefully(0, DEFAULT_SHUTDOWN_SECONDS, TimeUnit.SECONDS);
-        }
-
-        static AutoCloseableEventExecutor createCloseableProxy(final EventExecutor eventExecutor) {
-            final CloseableEventExecutorMixin closeableEventExecutor = new CloseableEventExecutorMixin(eventExecutor);
-            return Reflection.newProxy(AutoCloseableEventExecutor.class, new AbstractInvocationHandler() {
-                @Override
-                protected Object handleInvocation(final Object proxy, final Method method, final Object[] args)
-                        throws Throwable {
-                    if (method.getName().equals("close")) {
-                        closeableEventExecutor.close();
-                        return null;
-                    } else {
-                        return method.invoke(closeableEventExecutor.eventExecutor, args);
-                    }
-                }
-            });
-        }
-    }
-}
diff --git a/opendaylight/config/netty-event-executor-config/src/main/java/org/opendaylight/controller/config/yang/netty/eventexecutor/OSGiGlobalEventExecutor.java b/opendaylight/config/netty-event-executor-config/src/main/java/org/opendaylight/controller/config/yang/netty/eventexecutor/OSGiGlobalEventExecutor.java
deleted file mode 100644 (file)
index f5081ee..0000000
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.netty.eventexecutor;
-
-import static io.netty.util.concurrent.GlobalEventExecutor.INSTANCE;
-
-import com.google.common.annotations.Beta;
-import io.netty.util.concurrent.EventExecutor;
-import io.netty.util.concurrent.EventExecutorGroup;
-import io.netty.util.concurrent.Future;
-import io.netty.util.concurrent.ProgressivePromise;
-import io.netty.util.concurrent.Promise;
-import io.netty.util.concurrent.ScheduledFuture;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import org.osgi.service.component.annotations.Activate;
-import org.osgi.service.component.annotations.Component;
-import org.osgi.service.component.annotations.Deactivate;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@Beta
-@Component(immediate = true, property = "type=global-event-executor")
-public final class OSGiGlobalEventExecutor implements EventExecutor {
-    private static final Logger LOG = LoggerFactory.getLogger(OSGiGlobalEventExecutor.class);
-
-    @Override
-    public boolean isShuttingDown() {
-        return INSTANCE.isShuttingDown();
-    }
-
-    @Override
-    public Future<?> shutdownGracefully() {
-        return INSTANCE.shutdownGracefully();
-    }
-
-    @Override
-    public Future<?> shutdownGracefully(final long quietPeriod, final long timeout, final TimeUnit unit) {
-        return INSTANCE.shutdownGracefully(quietPeriod, timeout, unit);
-    }
-
-    @Override
-    public Future<?> terminationFuture() {
-        return INSTANCE.terminationFuture();
-    }
-
-    @Override
-    @Deprecated
-    public void shutdown() {
-        INSTANCE.shutdown();
-    }
-
-    @Override
-    public List<Runnable> shutdownNow() {
-        return INSTANCE.shutdownNow();
-    }
-
-    @Override
-    public Iterator<EventExecutor> iterator() {
-        return INSTANCE.iterator();
-    }
-
-    @Override
-    public Future<?> submit(final Runnable task) {
-        return INSTANCE.submit(task);
-    }
-
-    @Override
-    public <T> Future<T> submit(final Runnable task, final T result) {
-        return INSTANCE.submit(task, result);
-    }
-
-    @Override
-    public <T> Future<T> submit(final Callable<T> task) {
-        return INSTANCE.submit(task);
-    }
-
-    @Override
-    public ScheduledFuture<?> schedule(final Runnable command, final long delay, final TimeUnit unit) {
-        return INSTANCE.schedule(command, delay, unit);
-    }
-
-    @Override
-    public <V> ScheduledFuture<V> schedule(final Callable<V> callable, final long delay, final TimeUnit unit) {
-        return INSTANCE.schedule(callable, delay, unit);
-    }
-
-    @Override
-    public ScheduledFuture<?> scheduleAtFixedRate(final Runnable command, final long initialDelay, final long period,
-            final TimeUnit unit) {
-        return INSTANCE.scheduleAtFixedRate(command, initialDelay, period, unit);
-    }
-
-    @Override
-    public ScheduledFuture<?> scheduleWithFixedDelay(final Runnable command, final long initialDelay, final long delay,
-            final TimeUnit unit) {
-        return INSTANCE.scheduleWithFixedDelay(command, initialDelay, delay, unit);
-    }
-
-    @Override
-    public boolean isShutdown() {
-        return INSTANCE.isShutdown();
-    }
-
-    @Override
-    public boolean isTerminated() {
-        return INSTANCE.isTerminated();
-    }
-
-    @Override
-    public boolean awaitTermination(final long timeout, final TimeUnit unit) throws InterruptedException {
-        return INSTANCE.awaitTermination(timeout, unit);
-    }
-
-    @Override
-    public <T> List<java.util.concurrent.Future<T>> invokeAll(final Collection<? extends Callable<T>> tasks)
-            throws InterruptedException {
-        return INSTANCE.invokeAll(tasks);
-    }
-
-    @Override
-    public <T> List<java.util.concurrent.Future<T>> invokeAll(final Collection<? extends Callable<T>> tasks,
-            final long timeout, final TimeUnit unit) throws InterruptedException {
-        return INSTANCE.invokeAll(tasks, timeout, unit);
-    }
-
-    @Override
-    public <T> T invokeAny(final Collection<? extends Callable<T>> tasks)
-            throws InterruptedException, ExecutionException {
-        return INSTANCE.invokeAny(tasks);
-    }
-
-    @Override
-    public <T> T invokeAny(final Collection<? extends Callable<T>> tasks, final long timeout, final TimeUnit unit)
-            throws InterruptedException, ExecutionException, TimeoutException {
-        return INSTANCE.invokeAny(tasks, timeout, unit);
-    }
-
-    @Override
-    public void execute(final Runnable command) {
-        INSTANCE.execute(command);
-    }
-
-    @Override
-    public EventExecutor next() {
-        return INSTANCE.next();
-    }
-
-    @Override
-    public EventExecutorGroup parent() {
-        return INSTANCE.parent();
-    }
-
-    @Override
-    public boolean inEventLoop() {
-        return INSTANCE.inEventLoop();
-    }
-
-    @Override
-    public boolean inEventLoop(final Thread thread) {
-        return INSTANCE.inEventLoop(thread);
-    }
-
-    @Override
-    public <V> Promise<V> newPromise() {
-        return INSTANCE.newPromise();
-    }
-
-    @Override
-    public <V> ProgressivePromise<V> newProgressivePromise() {
-        return INSTANCE.newProgressivePromise();
-    }
-
-    @Override
-    public <V> Future<V> newSucceededFuture(final V result) {
-        return INSTANCE.newSucceededFuture(result);
-    }
-
-    @Override
-    public <V> Future<V> newFailedFuture(final Throwable cause) {
-        return INSTANCE.newFailedFuture(cause);
-    }
-
-    @Activate
-    void activate() {
-        LOG.info("Global Event executor enabled");
-    }
-
-    @Deactivate
-    void deactivate() {
-        LOG.info("Global Event executor disabled");
-    }
-
-}
diff --git a/opendaylight/config/netty-threadgroup-config/pom.xml b/opendaylight/config/netty-threadgroup-config/pom.xml
deleted file mode 100644 (file)
index 14a5c98..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.odlparent</groupId>
-    <artifactId>bundle-parent</artifactId>
-    <version>9.0.12</version>
-    <relativePath/>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>netty-threadgroup-config</artifactId>
-  <version>0.16.0-SNAPSHOT</version>
-  <packaging>bundle</packaging>
-  <name>${project.artifactId}</name>
-  <description>Configuration Wrapper around netty's event group</description>
-
-  <dependencies>
-    <dependency>
-      <groupId>io.netty</groupId>
-      <artifactId>netty-transport</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.osgi</groupId>
-      <artifactId>osgi.cmpn</artifactId>
-    </dependency>
-  </dependencies>
-</project>
diff --git a/opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/AbstractGlobalGroup.java b/opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/AbstractGlobalGroup.java
deleted file mode 100644 (file)
index ca4bc29..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.netty.threadgroup;
-
-import io.netty.channel.nio.NioEventLoopGroup;
-import java.util.concurrent.TimeUnit;
-
-abstract class AbstractGlobalGroup extends NioEventLoopGroup implements AutoCloseable {
-    AbstractGlobalGroup(final int threadCount) {
-        super(threadCount < 0 ? 0 : threadCount);
-    }
-
-    @Override
-    public final void close() {
-        shutdownGracefully(0, 1, TimeUnit.SECONDS);
-    }
-}
diff --git a/opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/Configuration.java b/opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/Configuration.java
deleted file mode 100644 (file)
index 174b44f..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.netty.threadgroup;
-
-import org.osgi.service.metatype.annotations.AttributeDefinition;
-import org.osgi.service.metatype.annotations.ObjectClassDefinition;
-
-@ObjectClassDefinition(pid = "org.opendaylight.netty.threadgroup")
-public @interface Configuration {
-    @AttributeDefinition(name = "global-boss-group-thread-count")
-    int bossThreadCount() default 0;
-
-    @AttributeDefinition(name = "global-worker-group-thread-count")
-    int workerThreadCount() default 0;
-}
diff --git a/opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/GlobalBossGroup.java b/opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/GlobalBossGroup.java
deleted file mode 100644 (file)
index 5b46184..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.netty.threadgroup;
-
-import io.netty.channel.EventLoopGroup;
-import org.osgi.service.component.annotations.Activate;
-import org.osgi.service.component.annotations.Component;
-import org.osgi.service.component.annotations.Deactivate;
-import org.osgi.service.metatype.annotations.Designate;
-
-@Component(immediate = true, service = EventLoopGroup.class, property = "type=global-boss-group")
-@Designate(ocd = Configuration.class)
-public final class GlobalBossGroup extends AbstractGlobalGroup {
-    @Activate
-    public GlobalBossGroup(final Configuration configuration) {
-        super(configuration.bossThreadCount());
-    }
-
-    @Deactivate
-    void deactivate() {
-        close();
-    }
-}
diff --git a/opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/GlobalWorkerGroup.java b/opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/GlobalWorkerGroup.java
deleted file mode 100644 (file)
index 4a9f46e..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.netty.threadgroup;
-
-import io.netty.channel.EventLoopGroup;
-import org.osgi.service.component.annotations.Activate;
-import org.osgi.service.component.annotations.Component;
-import org.osgi.service.component.annotations.Deactivate;
-import org.osgi.service.metatype.annotations.Designate;
-
-@Component(immediate = true, service = EventLoopGroup.class, property = "type=global-worker-group")
-@Designate(ocd = Configuration.class)
-public final class GlobalWorkerGroup extends AbstractGlobalGroup {
-    @Activate
-    public GlobalWorkerGroup(final Configuration configuration) {
-        super(configuration.workerThreadCount());
-    }
-
-    @Deactivate
-    void deactivate() {
-        close();
-    }
-}
diff --git a/opendaylight/config/netty-timer-config/pom.xml b/opendaylight/config/netty-timer-config/pom.xml
deleted file mode 100644 (file)
index a740ba2..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.odlparent</groupId>
-    <artifactId>bundle-parent</artifactId>
-    <version>9.0.12</version>
-    <relativePath/>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>netty-timer-config</artifactId>
-  <version>0.16.0-SNAPSHOT</version>
-  <packaging>bundle</packaging>
-  <name>${project.artifactId}</name>
-  <description>Configuration Wrapper around netty's timer</description>
-
-  <dependencies>
-    <dependency>
-      <groupId>io.netty</groupId>
-      <artifactId>netty-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.osgi</groupId>
-      <artifactId>osgi.cmpn</artifactId>
-    </dependency>
-  </dependencies>
-</project>
diff --git a/opendaylight/config/netty-timer-config/src/main/java/org/opendaylight/controller/config/yang/netty/timer/HashedWheelTimerCloseable.java b/opendaylight/config/netty-timer-config/src/main/java/org/opendaylight/controller/config/yang/netty/timer/HashedWheelTimerCloseable.java
deleted file mode 100644 (file)
index 7bc1352..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.netty.timer;
-
-import io.netty.util.HashedWheelTimer;
-import io.netty.util.Timeout;
-import io.netty.util.Timer;
-import io.netty.util.TimerTask;
-import java.util.Set;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.TimeUnit;
-import org.eclipse.jdt.annotation.Nullable;
-
-public final class HashedWheelTimerCloseable implements AutoCloseable, Timer {
-
-    private final Timer timer;
-
-    private HashedWheelTimerCloseable(final Timer timer) {
-        this.timer = timer;
-    }
-
-    @Override
-    public void close() {
-        stop();
-    }
-
-    @Override
-    public Timeout newTimeout(final TimerTask task, final long delay, final TimeUnit unit) {
-        return this.timer.newTimeout(task, delay, unit);
-    }
-
-    @Override
-    public Set<Timeout> stop() {
-        return this.timer.stop();
-    }
-
-    public static HashedWheelTimerCloseable newInstance(final @Nullable Long duration,
-            final @Nullable Integer ticksPerWheel) {
-        return newInstance(null, duration, ticksPerWheel);
-    }
-
-    public static HashedWheelTimerCloseable newInstance(final @Nullable ThreadFactory threadFactory,
-            final @Nullable Long duration, final @Nullable Integer ticksPerWheel) {
-        TimeUnit unit = TimeUnit.MILLISECONDS;
-        if (!nullOrNonPositive(duration) && threadFactory == null && nullOrNonPositive(ticksPerWheel)) {
-            return new HashedWheelTimerCloseable(new HashedWheelTimer(duration, unit));
-        }
-
-        if (!nullOrNonPositive(duration) && threadFactory == null && !nullOrNonPositive(ticksPerWheel)) {
-            return new HashedWheelTimerCloseable(new HashedWheelTimer(duration, unit, ticksPerWheel));
-        }
-
-        if (nullOrNonPositive(duration) && threadFactory != null && nullOrNonPositive(ticksPerWheel)) {
-            return new HashedWheelTimerCloseable(new HashedWheelTimer(threadFactory));
-        }
-
-        if (!nullOrNonPositive(duration) && threadFactory != null && nullOrNonPositive(ticksPerWheel)) {
-            return new HashedWheelTimerCloseable(
-                    new HashedWheelTimer(threadFactory, duration, unit));
-        }
-
-        if (!nullOrNonPositive(duration) && threadFactory != null && !nullOrNonPositive(ticksPerWheel)) {
-            return new HashedWheelTimerCloseable(
-                    new HashedWheelTimer(threadFactory, duration, unit, ticksPerWheel));
-        }
-
-        return new HashedWheelTimerCloseable(new HashedWheelTimer());
-    }
-
-    private static boolean nullOrNonPositive(final Number num) {
-        return num == null || num.longValue() <= 0;
-    }
-}
diff --git a/opendaylight/config/netty-timer-config/src/main/java/org/opendaylight/controller/config/yang/netty/timer/OSGiGlobalTimer.java b/opendaylight/config/netty-timer-config/src/main/java/org/opendaylight/controller/config/yang/netty/timer/OSGiGlobalTimer.java
deleted file mode 100644 (file)
index 9613000..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.netty.timer;
-
-import io.netty.util.Timeout;
-import io.netty.util.Timer;
-import io.netty.util.TimerTask;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-import org.osgi.service.component.annotations.Activate;
-import org.osgi.service.component.annotations.Component;
-import org.osgi.service.component.annotations.Deactivate;
-import org.osgi.service.metatype.annotations.AttributeDefinition;
-import org.osgi.service.metatype.annotations.Designate;
-import org.osgi.service.metatype.annotations.ObjectClassDefinition;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@Component(immediate = true, configurationPid = "org.opendaylight.netty.timer", property = "type=global-timer")
-@Designate(ocd = OSGiGlobalTimer.Config.class)
-public final class OSGiGlobalTimer implements Timer {
-    @ObjectClassDefinition
-    public @interface Config {
-        @AttributeDefinition(name = "tick-duration")
-        long tickDuration() default 0;
-        @AttributeDefinition(name = "ticks-per-wheel")
-        int ticksPerWheel() default 0;
-    }
-
-    private static final Logger LOG = LoggerFactory.getLogger(OSGiGlobalTimer.class);
-
-    private Timer delegate;
-
-    @Override
-    public Timeout newTimeout(final TimerTask task, final long delay, final TimeUnit unit) {
-        return delegate.newTimeout(task, delay, unit);
-    }
-
-    @Override
-    public Set<Timeout> stop() {
-        return delegate.stop();
-    }
-
-    @Activate
-    void activate(final Config config) {
-        delegate = HashedWheelTimerCloseable.newInstance(config.tickDuration(), config.ticksPerWheel());
-        LOG.info("Global Netty timer started");
-    }
-
-    @Deactivate
-    void deactivate() {
-        delegate.stop();
-        LOG.info("Global Netty timer stopped");
-    }
-}
diff --git a/opendaylight/config/pom.xml b/opendaylight/config/pom.xml
deleted file mode 100644 (file)
index f61ce50..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.odlparent</groupId>
-    <artifactId>odlparent-lite</artifactId>
-    <version>9.0.12</version>
-    <relativePath/>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>config-aggregator</artifactId>
-  <version>0.16.0-SNAPSHOT</version>
-  <packaging>pom</packaging>
-
-  <properties>
-    <maven.deploy.skip>true</maven.deploy.skip>
-    <maven.install.skip>true</maven.install.skip>
-  </properties>
-
-  <modules>
-    <module>threadpool-config-api</module>
-    <module>threadpool-config-impl</module>
-    <module>netty-threadgroup-config</module>
-    <module>netty-event-executor-config</module>
-    <module>netty-timer-config</module>
-  </modules>
-</project>
diff --git a/opendaylight/config/threadpool-config-api/pom.xml b/opendaylight/config/threadpool-config-api/pom.xml
deleted file mode 100644 (file)
index 81c069f..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.odlparent</groupId>
-    <artifactId>bundle-parent</artifactId>
-    <version>9.0.12</version>
-    <relativePath/>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>threadpool-config-api</artifactId>
-  <version>0.16.0-SNAPSHOT</version>
-  <packaging>bundle</packaging>
-  <name>${project.artifactId}</name>
-</project>
diff --git a/opendaylight/config/threadpool-config-api/src/main/java/org/opendaylight/controller/config/threadpool/ScheduledThreadPool.java b/opendaylight/config/threadpool-config-api/src/main/java/org/opendaylight/controller/config/threadpool/ScheduledThreadPool.java
deleted file mode 100644 (file)
index 79ed26b..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.config.threadpool;
-
-import java.util.concurrent.ScheduledExecutorService;
-
-/**
- * Interface representing scheduled {@link ThreadPool}.
- */
-public interface ScheduledThreadPool extends ThreadPool {
-
-    @Override
-    ScheduledExecutorService getExecutor();
-}
\ No newline at end of file
diff --git a/opendaylight/config/threadpool-config-api/src/main/java/org/opendaylight/controller/config/threadpool/ThreadPool.java b/opendaylight/config/threadpool-config-api/src/main/java/org/opendaylight/controller/config/threadpool/ThreadPool.java
deleted file mode 100644 (file)
index 68ecb80..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.config.threadpool;
-
-import java.util.concurrent.ExecutorService;
-
-/**
- * Interface representing thread pool.
- */
-public interface ThreadPool {
-
-    ExecutorService getExecutor();
-
-    int getMaxThreadCount();
-}
\ No newline at end of file
diff --git a/opendaylight/config/threadpool-config-impl/pom.xml b/opendaylight/config/threadpool-config-impl/pom.xml
deleted file mode 100644 (file)
index b506f8f..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.odlparent</groupId>
-    <artifactId>bundle-parent</artifactId>
-    <version>9.0.12</version>
-    <relativePath/>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>threadpool-config-impl</artifactId>
-  <version>0.16.0-SNAPSHOT</version>
-  <packaging>bundle</packaging>
-  <name>${project.artifactId}</name>
-
-  <dependencyManagement>
-    <dependencies>
-      <dependency>
-        <groupId>org.opendaylight.controller</groupId>
-        <artifactId>controller-artifacts</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
-        <type>pom</type>
-        <scope>import</scope>
-      </dependency>
-    </dependencies>
-  </dependencyManagement>
-
-  <dependencies>
-    <dependency>
-      <groupId>${project.groupId}</groupId>
-      <artifactId>threadpool-config-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.google.guava</groupId>
-      <artifactId>guava</artifactId>
-    </dependency>
-  </dependencies>
-</project>
diff --git a/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/FixedThreadPoolWrapper.java b/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/FixedThreadPoolWrapper.java
deleted file mode 100644 (file)
index 2dad264..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.config.threadpool.util;
-
-import java.io.Closeable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.ThreadPoolExecutor;
-import org.opendaylight.controller.config.threadpool.ThreadPool;
-
-/**
- * Implementation of {@link ThreadPool} using fixed number of threads wraps
- * {@link ExecutorService}.
- */
-public class FixedThreadPoolWrapper implements ThreadPool, Closeable {
-
-    private final ThreadPoolExecutor executor;
-
-    public FixedThreadPoolWrapper(int threadCount, ThreadFactory factory) {
-        this.executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(threadCount, factory);
-        executor.prestartAllCoreThreads();
-    }
-
-    @Override
-    public ExecutorService getExecutor() {
-        return Executors.unconfigurableExecutorService(executor);
-    }
-
-    @Override
-    public void close() {
-        executor.shutdown();
-    }
-
-    @Override
-    public int getMaxThreadCount() {
-        return executor.getMaximumPoolSize();
-    }
-
-    public void setMaxThreadCount(int maxThreadCount) {
-        executor.setCorePoolSize(maxThreadCount);
-        executor.setMaximumPoolSize(maxThreadCount);
-    }
-}
diff --git a/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/FlexibleThreadPoolWrapper.java b/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/FlexibleThreadPoolWrapper.java
deleted file mode 100644 (file)
index 9949e36..0000000
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.threadpool.util;
-
-import java.io.Closeable;
-import java.util.OptionalInt;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.RejectedExecutionHandler;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.config.threadpool.ThreadPool;
-
-/**
- * Implementation of {@link ThreadPool} using flexible number of threads wraps
- * {@link ExecutorService}.
- */
-public class FlexibleThreadPoolWrapper implements ThreadPool, Closeable {
-    private final ThreadPoolExecutor executor;
-
-    public FlexibleThreadPoolWrapper(final int minThreadCount, final int maxThreadCount, final long keepAlive,
-            final TimeUnit timeUnit, final ThreadFactory threadFactory) {
-        this(minThreadCount, maxThreadCount, keepAlive, timeUnit, threadFactory, getQueue(OptionalInt.empty()));
-    }
-
-    public FlexibleThreadPoolWrapper(final int minThreadCount, final int maxThreadCount, final long keepAlive,
-            final TimeUnit timeUnit, final ThreadFactory threadFactory, final OptionalInt queueCapacity) {
-        this(minThreadCount, maxThreadCount, keepAlive, timeUnit, threadFactory, getQueue(queueCapacity));
-    }
-
-    private FlexibleThreadPoolWrapper(final int minThreadCount, final int maxThreadCount, final long keepAlive,
-            final TimeUnit timeUnit, final ThreadFactory threadFactory, final BlockingQueue<Runnable> queue) {
-
-        executor = new ThreadPoolExecutor(minThreadCount, maxThreadCount, keepAlive, timeUnit,
-                queue, threadFactory, new FlexibleRejectionHandler());
-        executor.prestartAllCoreThreads();
-    }
-
-    /**
-     * Overriding the queue:
-     * ThreadPoolExecutor would not create new threads if the queue is not full, thus adding
-     * occurs in RejectedExecutionHandler.
-     * This impl saturates threadpool first, then queue. When both are full caller will get blocked.
-     */
-    private static ForwardingBlockingQueue getQueue(final OptionalInt capacity) {
-        final BlockingQueue<Runnable> delegate = capacity.isPresent() ? new LinkedBlockingQueue<>(capacity.getAsInt())
-                : new LinkedBlockingQueue<>();
-        return new ForwardingBlockingQueue(delegate);
-    }
-
-    @Override
-    public ExecutorService getExecutor() {
-        return Executors.unconfigurableExecutorService(executor);
-    }
-
-    public int getMinThreadCount() {
-        return executor.getCorePoolSize();
-    }
-
-    public void setMinThreadCount(final int minThreadCount) {
-        executor.setCorePoolSize(minThreadCount);
-    }
-
-    @Override
-    public int getMaxThreadCount() {
-        return executor.getMaximumPoolSize();
-    }
-
-    public void setMaxThreadCount(final int maxThreadCount) {
-        executor.setMaximumPoolSize(maxThreadCount);
-    }
-
-    public long getKeepAliveMillis() {
-        return executor.getKeepAliveTime(TimeUnit.MILLISECONDS);
-    }
-
-    public void setKeepAliveMillis(final long keepAliveMillis) {
-        executor.setKeepAliveTime(keepAliveMillis, TimeUnit.MILLISECONDS);
-    }
-
-    public void setThreadFactory(final ThreadFactory threadFactory) {
-        executor.setThreadFactory(threadFactory);
-    }
-
-    public void prestartAllCoreThreads() {
-        executor.prestartAllCoreThreads();
-    }
-
-    @Override
-    public void close() {
-        executor.shutdown();
-    }
-
-    /**
-     * if the max threads are met, then it will raise a rejectedExecution. We then push to the queue.
-     */
-    private static class FlexibleRejectionHandler implements RejectedExecutionHandler {
-        @Override
-        @SuppressWarnings("checkstyle:parameterName")
-        public void rejectedExecution(final Runnable r, final ThreadPoolExecutor executor) {
-            try {
-                executor.getQueue().put(r);
-            } catch (InterruptedException e) {
-                throw new RejectedExecutionException("Interrupted while waiting on the queue", e);
-            }
-        }
-    }
-
-    private static class ForwardingBlockingQueue
-            extends com.google.common.util.concurrent.ForwardingBlockingQueue<Runnable> {
-        private final BlockingQueue<Runnable> delegate;
-
-        ForwardingBlockingQueue(final BlockingQueue<Runnable> delegate) {
-            this.delegate = delegate;
-        }
-
-        @Override
-        protected BlockingQueue<Runnable> delegate() {
-            return delegate;
-        }
-
-        @Override
-        @SuppressWarnings("checkstyle:parameterName")
-        public boolean offer(final Runnable o) {
-            // ThreadPoolExecutor will spawn a new thread after core size is reached only
-            // if the queue.offer returns false.
-            return false;
-        }
-    }
-}
diff --git a/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/NamingThreadPoolFactory.java b/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/NamingThreadPoolFactory.java
deleted file mode 100644 (file)
index 0efa482..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.threadpool.util;
-
-import static java.util.Objects.requireNonNull;
-
-import java.io.Closeable;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.atomic.AtomicLong;
-
-/**
- * Implementation of {@link ThreadFactory}. This class is thread-safe.
- */
-public class NamingThreadPoolFactory implements ThreadFactory, Closeable {
-
-    private final ThreadGroup group;
-    private final String namePrefix;
-    private final AtomicLong threadName = new AtomicLong();
-
-    public NamingThreadPoolFactory(final String namePrefix) {
-        this.namePrefix = requireNonNull(namePrefix);
-        this.group = new ThreadGroup(namePrefix);
-    }
-
-    @Override
-    @SuppressWarnings("checkstyle:parameterName")
-    public Thread newThread(final Runnable r) {
-        return new Thread(group, r, String.format("%s-%d", group.getName(), threadName.incrementAndGet()));
-    }
-
-    @Override
-    public void close() {
-    }
-
-    public String getNamePrefix() {
-        return namePrefix;
-    }
-}
diff --git a/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/ScheduledThreadPoolWrapper.java b/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/ScheduledThreadPoolWrapper.java
deleted file mode 100644 (file)
index 648bd82..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.config.threadpool.util;
-
-import java.io.Closeable;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ScheduledThreadPoolExecutor;
-import java.util.concurrent.ThreadFactory;
-import org.opendaylight.controller.config.threadpool.ScheduledThreadPool;
-
-/**
- * Implementation of {@link ScheduledThreadPool} wraps
- * {@link ScheduledExecutorService}.
- */
-public class ScheduledThreadPoolWrapper implements ScheduledThreadPool, Closeable {
-
-    private final ScheduledThreadPoolExecutor executor;
-    private final int threadCount;
-
-    public ScheduledThreadPoolWrapper(int threadCount, ThreadFactory factory) {
-        this.threadCount = threadCount;
-        this.executor = new ScheduledThreadPoolExecutor(threadCount, factory);
-        executor.prestartAllCoreThreads();
-    }
-
-    @Override
-    public ScheduledExecutorService getExecutor() {
-        return Executors.unconfigurableScheduledExecutorService(executor);
-    }
-
-    @Override
-    public void close() {
-        executor.shutdown();
-    }
-
-    @Override
-    public int getMaxThreadCount() {
-        return threadCount;
-    }
-
-}
index 3d53e0f019e3ac3b588bf42b9f732da71d43ea07..fb0bc37268dda8a899fb8fcef769e65e6bd282e3 100644 (file)
@@ -4,7 +4,7 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>mdsal-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../parent</relativePath>
     </parent>
 
 
     <dependencies>
         <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>repackaged-akka</artifactId>
+            <groupId>com.github.spotbugs</groupId>
+            <artifactId>spotbugs-annotations</artifactId>
+            <optional>true</optional>
         </dependency>
         <dependency>
             <groupId>com.google.guava</groupId>
             <artifactId>guava</artifactId>
         </dependency>
-
+        <dependency>
+            <groupId>org.eclipse.jdt</groupId>
+            <artifactId>org.eclipse.jdt.annotation</artifactId>
+        </dependency>
         <dependency>
             <groupId>org.opendaylight.yangtools</groupId>
             <artifactId>concepts</artifactId>
             <groupId>org.opendaylight.yangtools</groupId>
             <artifactId>yang-data-impl</artifactId>
         </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-data-spi</artifactId>
+        </dependency>
         <dependency>
             <groupId>org.opendaylight.yangtools</groupId>
             <artifactId>yang-data-codec-binfmt</artifactId>
         </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-data-tree-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>repackaged-akka</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.scala-lang</groupId>
+            <artifactId>scala-library</artifactId>
+        </dependency>
 
         <!-- Testing dependencies -->
+        <dependency>
+            <groupId>org.apache.commons</groupId>
+            <artifactId>commons-lang3</artifactId>
+            <scope>test</scope>
+        </dependency>
         <dependency>
             <groupId>org.opendaylight.yangtools</groupId>
             <artifactId>mockito-configuration</artifactId>
         </dependency>
         <dependency>
-            <groupId>com.typesafe.akka</groupId>
-            <artifactId>akka-testkit_2.13</artifactId>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-data-tree-ri</artifactId>
+            <scope>test</scope>
         </dependency>
         <dependency>
-            <groupId>commons-lang</groupId>
-            <artifactId>commons-lang</artifactId>
-            <scope>test</scope>
+            <groupId>com.typesafe.akka</groupId>
+            <artifactId>akka-testkit_2.13</artifactId>
         </dependency>
     </dependencies>
 
     <build>
+        <pluginManagement>
+            <plugins>
+                <plugin>
+                    <artifactId>maven-javadoc-plugin</artifactId>
+                    <version>3.1.1</version>
+                </plugin>
+            </plugins>
+        </pluginManagement>
+
         <plugins>
             <plugin>
                 <groupId>org.apache.felix</groupId>
index cfaf477f8fc8da8c4f3e06f79e3b25840112a003..4658f3b754f8008f0b3c905bfc9056fd55371734 100644 (file)
@@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.access;
 
 import static com.google.common.base.Preconditions.checkArgument;
 
-import com.google.common.annotations.Beta;
 import com.google.common.annotations.VisibleForTesting;
 import java.io.DataInput;
 import java.io.DataOutput;
@@ -22,10 +21,7 @@ import org.slf4j.LoggerFactory;
 
 /**
  * Enumeration of all ABI versions supported by this implementation of the client access API.
- *
- * @author Robert Varga
  */
-@Beta
 public enum ABIVersion implements WritableObject {
     // NOTE: enumeration values need to be sorted in ascending order of their version to keep Comparable working
 
@@ -40,44 +36,20 @@ public enum ABIVersion implements WritableObject {
         }
     },
 
+    // BORON was 5
+    // NEON_SR2 was 6
+    // SODIUM_SR1 was 7
+    // MAGNESIUM was 8
+    // CHLORINE_SR2 was 9
+
     /**
-     * Initial ABI version, as shipped with Boron Simultaneous release.
-     */
-    // We seed the initial version to be the same as DataStoreVersions.BORON-VERSION for compatibility reasons.
-    BORON(5) {
-        @Override
-        public NormalizedNodeStreamVersion getStreamVersion() {
-            return NormalizedNodeStreamVersion.LITHIUM;
-        }
-    },
-    /**
-     * Revised ABI version. The messages remain the same as {@link #BORON}, but messages bearing QNames in any shape
-     * are using {@link NormalizedNodeStreamVersion#NEON_SR2}, which improves encoding.
-     */
-    NEON_SR2(6) {
-        @Override
-        public NormalizedNodeStreamVersion getStreamVersion() {
-            return NormalizedNodeStreamVersion.NEON_SR2;
-        }
-    },
-    /**
-     * Revised ABI version. The messages remain the same as {@link #BORON}, but messages bearing QNames in any shape
-     * are using {@link NormalizedNodeStreamVersion#SODIUM_SR1}, which improves encoding.
-     */
-    SODIUM_SR1(7) {
-        @Override
-        public NormalizedNodeStreamVersion getStreamVersion() {
-            return NormalizedNodeStreamVersion.SODIUM_SR1;
-        }
-    },
-    /**
-     * Revised ABI version. The messages remain the same as {@link #BORON}, but messages bearing QNames in any shape
-     * are using {@link NormalizedNodeStreamVersion#MAGNESIUM}, which improves encoding.
+     * Oldest ABI version we support. The messages remain the same as {@code CHLORINE_SR2}, the serialization proxies in
+     * use are flat objects without any superclasses. Data encoding does not include augmentations as separate objects.
      */
-    MAGNESIUM(8) {
+    POTASSIUM(10) {
         @Override
         public NormalizedNodeStreamVersion getStreamVersion() {
-            return NormalizedNodeStreamVersion.MAGNESIUM;
+            return NormalizedNodeStreamVersion.POTASSIUM;
         }
     },
 
@@ -117,7 +89,7 @@ public enum ABIVersion implements WritableObject {
      * @return Current {@link ABIVersion}
      */
     public static @NonNull ABIVersion current() {
-        return SODIUM_SR1;
+        return POTASSIUM;
     }
 
     /**
@@ -130,24 +102,22 @@ public enum ABIVersion implements WritableObject {
      * @throws PastVersionException if the specified integer identifies a past version which is no longer supported
      */
     public static @NonNull ABIVersion valueOf(final short value) throws FutureVersionException, PastVersionException {
-        switch (Short.toUnsignedInt(value)) {
-            case 0:
-            case 1:
-            case 2:
-            case 3:
-            case 4:
-                throw new PastVersionException(value, BORON);
-            case 5:
-                return BORON;
-            case 6:
-                return NEON_SR2;
-            case 7:
-                return SODIUM_SR1;
-            case 8:
-                return MAGNESIUM;
-            default:
-                throw new FutureVersionException(value, MAGNESIUM);
-        }
+        return switch (Short.toUnsignedInt(value)) {
+            case 0, 1, 2, 3, 4, 6, 7, 8, 9 -> throw new PastVersionException(value, POTASSIUM);
+            case 10 -> POTASSIUM;
+            default -> throw new FutureVersionException(value, POTASSIUM);
+        };
+    }
+
+    /**
+     * Return {@code true} if this version is earier than some {@code other} version.
+     *
+     * @param other Other {@link ABIVersion}
+     * @return {@code true} if {@code other is later}
+     * @throws NullPointerException if {@code other} is null
+     */
+    public boolean lt(final @NonNull ABIVersion other) {
+        return compareTo(other) < 0;
     }
 
     @Override
index 0567ef119110f2841ebf6882edeb2a2a47578bb6..1555b59501b586f4ab812b8a595001d4c4f33e1e 100644 (file)
@@ -9,17 +9,15 @@ package org.opendaylight.controller.cluster.access;
 
 import static java.util.Objects.requireNonNull;
 
-import com.google.common.annotations.Beta;
 import org.eclipse.jdt.annotation.NonNull;
 
 /**
  * Abstract base exception used for reporting version mismatches from {@link ABIVersion}.
- *
- * @author Robert Varga
  */
-@Beta
 public abstract class AbstractVersionException extends Exception {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+
     private final @NonNull ABIVersion closestVersion;
     private final int version;
 
@@ -34,7 +32,7 @@ public abstract class AbstractVersionException extends Exception {
      *
      * @return Numeric version
      */
-    public final int getVersion() {
+    public final int version() {
         return version;
     }
 
@@ -43,8 +41,7 @@ public abstract class AbstractVersionException extends Exception {
      *
      * @return Closest supported {@link ABIVersion}
      */
-    public final @NonNull ABIVersion getClosestVersion() {
+    public final @NonNull ABIVersion closestVersion() {
         return closestVersion;
     }
-
 }
index d5f132a7b8e73d74e5616d9bd9bf3d61a29eeb45..f0ceaa4890b61dbc38493075be9cbf7e3fc74213 100644 (file)
@@ -7,19 +7,15 @@
  */
 package org.opendaylight.controller.cluster.access;
 
-import com.google.common.annotations.Beta;
-
 /**
  * Exception thrown from {@link ABIVersion#valueOf(short)} when the specified version is too new to be supported
  * by the codebase.
- *
- * @author Robert Varga
  */
-@Beta
 public final class FutureVersionException extends AbstractVersionException {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
-    FutureVersionException(final short version, ABIVersion closest) {
+    FutureVersionException(final short version, final ABIVersion closest) {
         super("Version " + Short.toUnsignedInt(version) + " is too new", version, closest);
     }
 }
index c8cbe54b3d0ae9bf725dee6114f349fd00b671d2..c333d3495e163e494c4daa28480e058c0c3a38af 100644 (file)
@@ -7,16 +7,12 @@
  */
 package org.opendaylight.controller.cluster.access;
 
-import com.google.common.annotations.Beta;
-
 /**
  * Exception thrown from {@link ABIVersion#valueOf(short)} when the specified version is too old and no longer
  * supported by the codebase.
- *
- * @author Robert Varga
  */
-@Beta
 public final class PastVersionException extends AbstractVersionException {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     PastVersionException(final short version, final ABIVersion closest) {
index 3898ee22b3e2d5b3972fd78a5fdc52b9278c49b2..9e2998c5b2536a8ef3d406de7e5c5f90c203dab2 100644 (file)
@@ -8,7 +8,6 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
@@ -16,11 +15,9 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
  * Request to abort a local transaction. Since local transactions do not introduce state on the backend until they
  * are ready, the purpose of this message is to inform the backend that a message identifier has been used. This is
  * not important for single transactions, but is critical to ensure transaction ordering within local histories.
- *
- * @author Robert Varga
  */
-@Beta
 public final class AbortLocalTransactionRequest extends AbstractLocalTransactionRequest<AbortLocalTransactionRequest> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public AbortLocalTransactionRequest(final @NonNull TransactionIdentifier identifier,
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractLocalHistoryRequestProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractLocalHistoryRequestProxy.java
deleted file mode 100644 (file)
index f6ba2e7..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.DataInput;
-import java.io.IOException;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestProxy;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-
-/**
- * Abstract base class for serialization proxies associated with {@link LocalHistoryRequest}s.
- *
- * @author Robert Varga
- *
- * @param <T> Message type
- */
-abstract class AbstractLocalHistoryRequestProxy<T extends LocalHistoryRequest<T>>
-        extends AbstractRequestProxy<LocalHistoryIdentifier, T> {
-    private static final long serialVersionUID = 1L;
-
-    protected AbstractLocalHistoryRequestProxy() {
-        // For Externalizable
-    }
-
-    AbstractLocalHistoryRequestProxy(final T request) {
-        super(request);
-    }
-
-    @Override
-    protected final LocalHistoryIdentifier readTarget(final DataInput in) throws IOException {
-        return LocalHistoryIdentifier.readFrom(in);
-    }
-}
index efc0e856b20c4d7ed8ebeb7f49dc9460bfa28767..5831e65c11a88aabea03d78ee595635a4490097e 100644 (file)
@@ -22,6 +22,7 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
  */
 public abstract class AbstractLocalTransactionRequest<T extends AbstractLocalTransactionRequest<T>>
         extends TransactionRequest<T> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     AbstractLocalTransactionRequest(final TransactionIdentifier identifier, final long sequence,
@@ -30,7 +31,7 @@ public abstract class AbstractLocalTransactionRequest<T extends AbstractLocalTra
     }
 
     @Override
-    protected final AbstractTransactionRequestProxy<T> externalizableProxy(final ABIVersion version) {
+    protected final SerialForm<T> externalizableProxy(final ABIVersion version) {
         throw new UnsupportedOperationException("Local transaction request " + this + " should never be serialized");
     }
 
index fa53a599aa1cbfa32dbdaac87b1a54339ee6cff9..2b4ee0e7e8dd2ef8ffaa7a2e9b69a1783668b257 100644 (file)
@@ -10,12 +10,15 @@ package org.opendaylight.controller.cluster.access.commands;
 import static java.util.Objects.requireNonNull;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects.ToStringHelper;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
 
 /**
  * Abstract base class for {@link TransactionRequest}s accessing data as visible in the isolated context of a particular
@@ -25,13 +28,33 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
  * This class is visible outside of this package for the purpose of allowing common instanceof checks
  * and simplified codepaths.
  *
- * @author Robert Varga
- *
  * @param <T> Message type
  */
-@Beta
 public abstract class AbstractReadPathTransactionRequest<T extends AbstractReadPathTransactionRequest<T>>
         extends AbstractReadTransactionRequest<T> {
+    interface SerialForm<T extends AbstractReadPathTransactionRequest<T>>
+            extends AbstractReadTransactionRequest.SerialForm<T> {
+
+        @Override
+        default T readExternal(final ObjectInput in, final TransactionIdentifier target, final long sequence,
+                final ActorRef replyTo, final boolean snapshotOnly) throws IOException {
+            return readExternal(in, target, sequence, replyTo, snapshotOnly,
+                NormalizedNodeDataInput.newDataInput(in).readYangInstanceIdentifier());
+        }
+
+        @NonNull T readExternal(@NonNull ObjectInput in, @NonNull TransactionIdentifier target, long sequence,
+            @NonNull ActorRef replyTo, boolean snapshotOnly, @NonNull YangInstanceIdentifier path) throws IOException;
+
+        @Override
+        default void writeExternal(final ObjectOutput out, final T msg) throws IOException {
+            AbstractReadTransactionRequest.SerialForm.super.writeExternal(out, msg);
+            try (var nnout = msg.getVersion().getStreamVersion().newDataOutput(out)) {
+                nnout.writeYangInstanceIdentifier(msg.getPath());
+            }
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final @NonNull YangInstanceIdentifier path;
@@ -57,5 +80,5 @@ public abstract class AbstractReadPathTransactionRequest<T extends AbstractReadP
     }
 
     @Override
-    protected abstract AbstractReadTransactionRequestProxyV1<T> externalizableProxy(ABIVersion version);
+    protected abstract SerialForm<T> externalizableProxy(ABIVersion version);
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadPathTransactionRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadPathTransactionRequestProxyV1.java
deleted file mode 100644 (file)
index 2cd4ec9..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2017 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
-
-/**
- * Abstract base class for serialization proxies associated with {@link AbstractReadTransactionRequest}s. It implements
- * the initial (Boron) serialization format.
- *
- * @author Robert Varga
- *
- * @param <T> Message type
- */
-abstract class AbstractReadPathTransactionRequestProxyV1<T extends AbstractReadPathTransactionRequest<T>>
-        extends AbstractReadTransactionRequestProxyV1<T> {
-    private static final long serialVersionUID = 1L;
-
-    private YangInstanceIdentifier path;
-    private transient NormalizedNodeStreamVersion streamVersion;
-
-    protected AbstractReadPathTransactionRequestProxyV1() {
-        // For Externalizable
-    }
-
-    AbstractReadPathTransactionRequestProxyV1(final T request) {
-        super(request);
-        path = request.getPath();
-        streamVersion = request.getVersion().getStreamVersion();
-    }
-
-    @Override
-    public final void writeExternal(final ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-        try (NormalizedNodeDataOutput nnout = streamVersion.newDataOutput(out)) {
-            nnout.writeYangInstanceIdentifier(path);
-        }
-    }
-
-    @Override
-    public final void readExternal(final ObjectInput in) throws ClassNotFoundException, IOException {
-        super.readExternal(in);
-        path = NormalizedNodeDataInput.newDataInput(in).readYangInstanceIdentifier();
-    }
-
-    @Override
-    protected final T createReadRequest(final TransactionIdentifier target, final long sequence,
-            final ActorRef replyTo, final boolean snapshotOnly) {
-        return createReadPathRequest(target, sequence, replyTo, path, snapshotOnly);
-    }
-
-    abstract T createReadPathRequest(TransactionIdentifier target, long sequence, ActorRef replyTo,
-            YangInstanceIdentifier requestPath, boolean snapshotOnly);
-}
index 3fc4821edf99e64eed0feec7b983b1015fc86329..23fdd85140db711919672496df7cb10745af7c4a 100644 (file)
@@ -8,8 +8,11 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects.ToStringHelper;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
@@ -20,13 +23,28 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
  * This class is visible outside of this package for the purpose of allowing common instanceof checks
  * and simplified codepaths.
  *
- * @author Robert Varga
- *
  * @param <T> Message type
  */
-@Beta
 public abstract class AbstractReadTransactionRequest<T extends AbstractReadTransactionRequest<T>>
         extends TransactionRequest<T> {
+    interface SerialForm<T extends AbstractReadTransactionRequest<T>> extends TransactionRequest.SerialForm<T> {
+        @Override
+        default T readExternal(final ObjectInput in, final TransactionIdentifier target, final long sequence,
+                final ActorRef replyTo) throws IOException {
+            return readExternal(in, target, sequence, replyTo, in.readBoolean());
+        }
+
+        @NonNull T readExternal(@NonNull ObjectInput in, @NonNull TransactionIdentifier target, long sequence,
+            @NonNull ActorRef replyTo, boolean snapshotOnly) throws IOException;
+
+        @Override
+        default void writeExternal(final ObjectOutput out, final T msg) throws IOException {
+            TransactionRequest.SerialForm.super.writeExternal(out, msg);
+            out.writeBoolean(msg.isSnapshotOnly());
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final boolean snapshotOnly;
@@ -52,5 +70,5 @@ public abstract class AbstractReadTransactionRequest<T extends AbstractReadTrans
     }
 
     @Override
-    protected abstract AbstractReadTransactionRequestProxyV1<T> externalizableProxy(ABIVersion version);
+    protected abstract SerialForm<T> externalizableProxy(ABIVersion version);
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadTransactionRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadTransactionRequestProxyV1.java
deleted file mode 100644 (file)
index 88820bd..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Abstract base class for serialization proxies associated with {@link AbstractReadTransactionRequest}s. It implements
- * the initial (Boron) serialization format.
- *
- * @author Robert Varga
- *
- * @param <T> Message type
- */
-abstract class AbstractReadTransactionRequestProxyV1<T extends AbstractReadTransactionRequest<T>>
-        extends AbstractTransactionRequestProxy<T> {
-    private static final long serialVersionUID = 1L;
-    private boolean snapshotOnly;
-
-    protected AbstractReadTransactionRequestProxyV1() {
-        // For Externalizable
-    }
-
-    AbstractReadTransactionRequestProxyV1(final T request) {
-        super(request);
-        snapshotOnly = request.isSnapshotOnly();
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-        out.writeBoolean(snapshotOnly);
-    }
-
-    @Override
-    public void readExternal(final ObjectInput in) throws ClassNotFoundException, IOException {
-        super.readExternal(in);
-        snapshotOnly = in.readBoolean();
-    }
-
-    @Override
-    protected final T createRequest(final TransactionIdentifier target, final long sequence, final ActorRef replyTo) {
-        return createReadRequest(target, sequence, replyTo, snapshotOnly);
-    }
-
-    @SuppressWarnings("checkstyle:hiddenField")
-    abstract T createReadRequest(TransactionIdentifier target, long sequence, ActorRef replyTo, boolean snapshotOnly);
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionRequestProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionRequestProxy.java
deleted file mode 100644 (file)
index 437d281..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.DataInput;
-import java.io.IOException;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestProxy;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Abstract base class for serialization proxies associated with {@link TransactionRequest}s.
- *
- * @author Robert Varga
- *
- * @param <T> Message type
- */
-abstract class AbstractTransactionRequestProxy<T extends TransactionRequest<T>>
-        extends AbstractRequestProxy<TransactionIdentifier, T> {
-    private static final long serialVersionUID = 1L;
-
-    protected AbstractTransactionRequestProxy() {
-        // For Externalizable
-    }
-
-    AbstractTransactionRequestProxy(final T request) {
-        super(request);
-    }
-
-    @Override
-    protected final TransactionIdentifier readTarget(final DataInput in) throws IOException {
-        return TransactionIdentifier.readFrom(in);
-    }
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionSuccessProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionSuccessProxy.java
deleted file mode 100644 (file)
index a1284b7..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.DataInput;
-import java.io.IOException;
-import org.opendaylight.controller.cluster.access.concepts.AbstractSuccessProxy;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Abstract base class for serialization proxies associated with {@link TransactionSuccess}es.
- *
- * @author Robert Varga
- *
- * @param <T> Message type
- */
-abstract class AbstractTransactionSuccessProxy<T extends TransactionSuccess<T>>
-        extends AbstractSuccessProxy<TransactionIdentifier, T> {
-    private static final long serialVersionUID = 1L;
-
-    protected AbstractTransactionSuccessProxy() {
-        // For Externalizable
-    }
-
-    AbstractTransactionSuccessProxy(final T request) {
-        super(request);
-    }
-
-    @Override
-    protected final TransactionIdentifier readTarget(final DataInput in) throws IOException {
-        return TransactionIdentifier.readFrom(in);
-    }
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCF.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCF.java
new file mode 100644 (file)
index 0000000..ea9c37e
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link ConnectClientFailure}. It implements the Chlorine SR2 serialization format.
+ */
+final class CCF implements ConnectClientFailure.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ConnectClientFailure message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public CCF() {
+        // for Externalizable
+    }
+
+    CCF(final ConnectClientFailure request) {
+        message = requireNonNull(request);
+    }
+
+    @Override
+    public ConnectClientFailure message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final ConnectClientFailure message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCR.java
new file mode 100644 (file)
index 0000000..ace94d5
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link ConnectClientRequest}. It implements the Chlorine SR2 serialization format.
+ */
+final class CCR implements ConnectClientRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ConnectClientRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public CCR() {
+        // for Externalizable
+    }
+
+    CCR(final ConnectClientRequest request) {
+        message = requireNonNull(request);
+    }
+
+    @Override
+    public ConnectClientRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final ConnectClientRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCS.java
new file mode 100644 (file)
index 0000000..ea425e5
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link ConnectClientSuccess}. It implements the Chlorine SR2 serialization format.
+ */
+final class CCS implements ConnectClientSuccess.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ConnectClientSuccess message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public CCS() {
+        // for Externalizable
+    }
+
+    CCS(final ConnectClientSuccess request) {
+        message = requireNonNull(request);
+    }
+
+    @Override
+    public ConnectClientSuccess message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final ConnectClientSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CHR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CHR.java
new file mode 100644 (file)
index 0000000..da3fd13
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link CreateLocalHistoryRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class CHR implements CreateLocalHistoryRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private CreateLocalHistoryRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public CHR() {
+        // For Externalizable
+    }
+
+    CHR(final CreateLocalHistoryRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public CreateLocalHistoryRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final CreateLocalHistoryRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
index ece472056473df653610cb9c51fe1e9918c28c55..67b1a40408f39067e6ec42b6bc99695d1177ee05 100644 (file)
@@ -7,7 +7,6 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 
 /**
@@ -15,11 +14,9 @@ import org.opendaylight.controller.cluster.access.concepts.RequestException;
  * been closed, either via a successful commit or abort (which is indicated via {@link #isSuccessful()}. This can
  * happen if the corresponding journal record is replicated, but the message to the frontend gets lost and the backed
  * leader moved before the frontend retried the corresponding request.
- *
- * @author Robert Varga
  */
-@Beta
 public final class ClosedTransactionException extends RequestException {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final boolean successful;
index adef0c31bc022775889ad6d123954f6be955758d..7a3f771b474789bfe576a9a7465a9eacd4e55ad6 100644 (file)
@@ -10,29 +10,26 @@ package org.opendaylight.controller.cluster.access.commands;
 import static java.util.Objects.requireNonNull;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects.ToStringHelper;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.ObjectStreamException;
 import java.util.Optional;
 import org.eclipse.jdt.annotation.NonNull;
 import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 /**
  * Request to commit a local transaction. Since local transactions do not introduce state on the backend until they
  * are ready, this message carries a complete set of modifications.
- *
- * @author Robert Varga
  */
-@Beta
 public final class CommitLocalTransactionRequest
         extends AbstractLocalTransactionRequest<CommitLocalTransactionRequest> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
-    @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
-            + "implements writeReplace to delegate serialization to a Proxy class and thus instances of this class "
-            + "aren't serialized. FindBugs does not recognize this.")
     private final DataTreeModification mod;
     private final Exception delayedFailure;
     private final boolean coordinated;
@@ -77,4 +74,19 @@ public final class CommitLocalTransactionRequest
         return super.addToStringAttributes(toStringHelper).add("coordinated", coordinated)
                 .add("delayedError", delayedFailure);
     }
+
+    @java.io.Serial
+    private void readObject(final ObjectInputStream stream) throws IOException, ClassNotFoundException {
+        throwNSE();
+    }
+
+    @java.io.Serial
+    private void readObjectNoData() throws ObjectStreamException {
+        throwNSE();
+    }
+
+    @java.io.Serial
+    private void writeObject(final ObjectOutputStream stream) throws IOException {
+        throwNSE();
+    }
 }
index 46b460ac0d364ec137113fcb65cea599ac724058..1157d1b6f888fc596e1cd42dcd2ca9796a889348 100644 (file)
@@ -7,20 +7,31 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
+import java.io.DataInput;
+import java.io.IOException;
 import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 import org.opendaylight.controller.cluster.access.concepts.RequestFailure;
 
 /**
  * A {@link RequestFailure} reported when {@link ConnectClientRequest} fails.
- *
- * @author Robert Varga
  */
-@Beta
 public final class ConnectClientFailure extends RequestFailure<ClientIdentifier, ConnectClientFailure> {
+    interface SerialForm extends RequestFailure.SerialForm<ClientIdentifier, ConnectClientFailure> {
+        @Override
+        default ClientIdentifier readTarget(final DataInput in) throws IOException {
+            return ClientIdentifier.readFrom(in);
+        }
+
+        @Override
+        default ConnectClientFailure createFailure(final ClientIdentifier target, final long sequence,
+                final RequestException cause) {
+            return new ConnectClientFailure(target, sequence, cause);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     ConnectClientFailure(final ClientIdentifier target, final long sequence, final RequestException cause) {
@@ -32,9 +43,8 @@ public final class ConnectClientFailure extends RequestFailure<ClientIdentifier,
     }
 
     @Override
-    protected AbstractRequestFailureProxy<ClientIdentifier, ConnectClientFailure> externalizableProxy(
-            final ABIVersion version) {
-        return new ConnectClientFailureProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new CCF(this);
     }
 
     @Override
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientFailureProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientFailureProxyV1.java
deleted file mode 100644 (file)
index 55efb28..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.DataInput;
-import java.io.IOException;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.RequestException;
-
-/**
- * Serialization proxy for use with {@link ConnectClientFailure}. This class implements initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ConnectClientFailureProxyV1 extends AbstractRequestFailureProxy<ClientIdentifier, ConnectClientFailure> {
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public ConnectClientFailureProxyV1() {
-        // For Externalizable
-    }
-
-    ConnectClientFailureProxyV1(final ConnectClientFailure failure) {
-        super(failure);
-    }
-
-    @Override
-    protected ConnectClientFailure createFailure(final ClientIdentifier target, final long sequence,
-            final RequestException cause) {
-        return new ConnectClientFailure(target, sequence, cause);
-    }
-
-    @Override
-    protected ClientIdentifier readTarget(final DataInput in) throws IOException {
-        return ClientIdentifier.readFrom(in);
-    }
-}
index ba86035e920a2356424bbdc37050d0eb2771907c..953fafefa89078871c7c207100199f5779af6f22 100644 (file)
@@ -10,10 +10,12 @@ package org.opendaylight.controller.cluster.access.commands;
 import static java.util.Objects.requireNonNull;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects.ToStringHelper;
+import java.io.DataInput;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
 import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestProxy;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.Request;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
@@ -26,11 +28,30 @@ import org.opendaylight.controller.cluster.access.concepts.RequestException;
  *
  * <p>
  * It also includes request stream sequencing information.
- *
- * @author Robert Varga
  */
-@Beta
 public final class ConnectClientRequest extends Request<ClientIdentifier, ConnectClientRequest> {
+    interface SerialForm extends Request.SerialForm<ClientIdentifier, ConnectClientRequest> {
+        @Override
+        default ConnectClientRequest readExternal(final ObjectInput in, final ClientIdentifier target,
+                final long sequence, final ActorRef replyTo) throws IOException {
+            return new ConnectClientRequest(target, sequence, replyTo, ABIVersion.inexactReadFrom(in),
+                ABIVersion.inexactReadFrom(in));
+        }
+
+        @Override
+        default ClientIdentifier readTarget(final DataInput in) throws IOException {
+            return ClientIdentifier.readFrom(in);
+        }
+
+        @Override
+        default void writeExternal(final ObjectOutput out, final ConnectClientRequest msg) throws IOException {
+            Request.SerialForm.super.writeExternal(out, msg);
+            msg.getMinVersion().writeTo(out);
+            msg.getMaxVersion().writeTo(out);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final ABIVersion minVersion;
@@ -50,8 +71,8 @@ public final class ConnectClientRequest extends Request<ClientIdentifier, Connec
 
     private ConnectClientRequest(final ConnectClientRequest request, final ABIVersion version) {
         super(request, version);
-        this.minVersion = request.minVersion;
-        this.maxVersion = request.maxVersion;
+        minVersion = request.minVersion;
+        maxVersion = request.maxVersion;
     }
 
     public ABIVersion getMinVersion() {
@@ -68,9 +89,8 @@ public final class ConnectClientRequest extends Request<ClientIdentifier, Connec
     }
 
     @Override
-    protected AbstractRequestProxy<ClientIdentifier, ConnectClientRequest> externalizableProxy(
-            final ABIVersion version) {
-        return new ConnectClientRequestProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new CCR(this);
     }
 
     @Override
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientRequestProxyV1.java
deleted file mode 100644 (file)
index da3a601..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import java.io.DataInput;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestProxy;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-
-/**
- * Externalizable proxy for use with {@link ConnectClientRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ConnectClientRequestProxyV1 extends AbstractRequestProxy<ClientIdentifier, ConnectClientRequest> {
-    private ABIVersion minVersion;
-    private ABIVersion maxVersion;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public ConnectClientRequestProxyV1() {
-        // for Externalizable
-    }
-
-    ConnectClientRequestProxyV1(final ConnectClientRequest request) {
-        super(request);
-        this.minVersion = request.getMinVersion();
-        this.maxVersion = request.getMaxVersion();
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-        minVersion.writeTo(out);
-        maxVersion.writeTo(out);
-    }
-
-    @Override
-    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-        super.readExternal(in);
-        minVersion = ABIVersion.inexactReadFrom(in);
-        maxVersion = ABIVersion.inexactReadFrom(in);
-    }
-
-    @Override
-    protected ConnectClientRequest createRequest(final ClientIdentifier target, final long sequence,
-            final ActorRef replyTo) {
-        return new ConnectClientRequest(target, sequence, replyTo, minVersion, maxVersion);
-    }
-
-    @Override
-    protected ClientIdentifier readTarget(final DataInput in) throws IOException {
-        return ClientIdentifier.readFrom(in);
-    }
-}
index 43fdb3c3c26139c657f358e6eaa9bed6a74e0162..ad0e3624e1f1aa05969fa850c8b5b4f897316458 100644 (file)
@@ -12,39 +12,84 @@ import static java.util.Objects.requireNonNull;
 
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
-import com.google.common.annotations.Beta;
+import akka.serialization.JavaSerializer;
+import akka.serialization.Serialization;
 import com.google.common.base.MoreObjects.ToStringHelper;
 import com.google.common.collect.ImmutableList;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.io.DataInput;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutput;
+import java.io.ObjectOutputStream;
+import java.io.ObjectStreamException;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.Optional;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
 
 /**
  * Successful reply to an {@link ConnectClientRequest}. Client actor which initiated this connection should use
  * the version reported via {@link #getVersion()} of this message to communicate with this backend. Should this backend
  * fail, the client can try accessing the provided alternates.
- *
- * @author Robert Varga
  */
-@Beta
 public final class ConnectClientSuccess extends RequestSuccess<ClientIdentifier, ConnectClientSuccess> {
-    private static final long serialVersionUID = 1L;
+    interface SerialForm extends RequestSuccess.SerialForm<ClientIdentifier, ConnectClientSuccess> {
+        @Override
+        default ClientIdentifier readTarget(final DataInput in) throws IOException {
+            return ClientIdentifier.readFrom(in);
+        }
+
+        @Override
+        default ConnectClientSuccess readExternal(final ObjectInput in, final ClientIdentifier target,
+                final long sequence) throws IOException, ClassNotFoundException {
+            final var backend = JavaSerializer.currentSystem().value().provider()
+                .resolveActorRef((String) in.readObject());
+            final var maxMessages = in.readInt();
+
+            final int alternatesSize = in.readInt();
+            final var alternates = new ArrayList<ActorSelection>(alternatesSize);
+            for (int i = 0; i < alternatesSize; ++i) {
+                alternates.add(ActorSelection.apply(ActorRef.noSender(), (String)in.readObject()));
+            }
+
+            return new ConnectClientSuccess(target, sequence, backend, alternates, maxMessages, null);
+        }
+
+        @Override
+        default void writeExternal(final ObjectOutput out, final ConnectClientSuccess msg) throws IOException {
+            out.writeObject(Serialization.serializedActorPath(msg.backend));
+            out.writeInt(msg.maxMessages);
+
+            out.writeInt(msg.alternates.size());
+            for (ActorSelection b : msg.alternates) {
+                out.writeObject(b.toSerializationFormat());
+            }
+
+            // We are ignoring the DataTree, it is not serializable anyway
+        }
+    }
 
-    @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
-            + "implements writeReplace to delegate serialization to a Proxy class and thus instances of this class "
-            + "aren't serialized. FindBugs does not recognize this.")
-    private final @NonNull List<ActorSelection> alternates;
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
 
-    @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "See justification above.")
+    private final @NonNull ImmutableList<ActorSelection> alternates;
     private final ReadOnlyDataTree dataTree;
     private final @NonNull ActorRef backend;
     private final int maxMessages;
 
+    private ConnectClientSuccess(final ConnectClientSuccess success, final ABIVersion version) {
+        super(success, version);
+        alternates = success.alternates;
+        dataTree = success.dataTree;
+        backend = success.backend;
+        maxMessages = success.maxMessages;
+    }
+
     ConnectClientSuccess(final ClientIdentifier target, final long sequence, final ActorRef backend,
         final List<ActorSelection> alternates, final int maxMessages, final ReadOnlyDataTree dataTree) {
         super(target, sequence);
@@ -83,13 +128,13 @@ public final class ConnectClientSuccess extends RequestSuccess<ClientIdentifier,
     }
 
     @Override
-    protected ConnectClientSuccessProxyV1 externalizableProxy(final ABIVersion version) {
-        return new ConnectClientSuccessProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new CCS(this);
     }
 
     @Override
     protected ConnectClientSuccess cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new ConnectClientSuccess(this, version);
     }
 
     @Override
@@ -97,4 +142,19 @@ public final class ConnectClientSuccess extends RequestSuccess<ClientIdentifier,
         return super.addToStringAttributes(toStringHelper).add("alternates", alternates)
                 .add("dataTree present", getDataTree().isPresent()).add("maxMessages", maxMessages);
     }
+
+    @java.io.Serial
+    private void readObject(final ObjectInputStream stream) throws IOException, ClassNotFoundException {
+        throwNSE();
+    }
+
+    @java.io.Serial
+    private void readObjectNoData() throws ObjectStreamException {
+        throwNSE();
+    }
+
+    @java.io.Serial
+    private void writeObject(final ObjectOutputStream stream) throws IOException {
+        throwNSE();
+    }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientSuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientSuccessProxyV1.java
deleted file mode 100644 (file)
index 8dd40ac..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import akka.actor.ActorSelection;
-import akka.serialization.JavaSerializer;
-import akka.serialization.Serialization;
-import java.io.DataInput;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.util.ArrayList;
-import java.util.List;
-import org.opendaylight.controller.cluster.access.concepts.AbstractSuccessProxy;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-
-/**
- * Externalizable proxy for use with {@link ConnectClientSuccess}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ConnectClientSuccessProxyV1 extends AbstractSuccessProxy<ClientIdentifier, ConnectClientSuccess> {
-    private static final long serialVersionUID = 1L;
-
-    private List<ActorSelection> alternates;
-    private ActorRef backend;
-    private int maxMessages;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public ConnectClientSuccessProxyV1() {
-        // For Externalizable
-    }
-
-    ConnectClientSuccessProxyV1(final ConnectClientSuccess success) {
-        super(success);
-        this.alternates = success.getAlternates();
-        this.backend = success.getBackend();
-        this.maxMessages = success.getMaxMessages();
-        // We are ignoring the DataTree, it is not serializable anyway
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-
-        out.writeObject(Serialization.serializedActorPath(backend));
-        out.writeInt(maxMessages);
-
-        out.writeInt(alternates.size());
-        for (ActorSelection b : alternates) {
-            out.writeObject(b.toSerializationFormat());
-        }
-    }
-
-    @Override
-    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-        super.readExternal(in);
-
-        backend = JavaSerializer.currentSystem().value().provider().resolveActorRef((String) in.readObject());
-        maxMessages = in.readInt();
-
-        final int alternatesSize = in.readInt();
-        alternates = new ArrayList<>(alternatesSize);
-        for (int i = 0; i < alternatesSize; ++i) {
-            alternates.add(ActorSelection.apply(ActorRef.noSender(), (String)in.readObject()));
-        }
-    }
-
-    @Override
-    protected ConnectClientSuccess createSuccess(final ClientIdentifier target, final long sequence) {
-        return new ConnectClientSuccess(target, sequence, backend, alternates, maxMessages, null);
-    }
-
-    @Override
-    protected ClientIdentifier readTarget(final DataInput in) throws IOException {
-        return ClientIdentifier.readFrom(in);
-    }
-}
index 01a110d046e16aed03d68ff355efec133a9d7453..b627bafa438cf005f723146fb316c91a3a0b1e90 100644 (file)
@@ -8,17 +8,23 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.ObjectInput;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 
 /**
  * Request to create a new local history.
- *
- * @author Robert Varga
  */
-@Beta
 public final class CreateLocalHistoryRequest extends LocalHistoryRequest<CreateLocalHistoryRequest> {
+    interface SerialForm extends LocalHistoryRequest.SerialForm<CreateLocalHistoryRequest> {
+        @Override
+        default CreateLocalHistoryRequest readExternal(final ObjectInput in, final LocalHistoryIdentifier target,
+                final long sequence, final ActorRef replyTo) {
+            return new CreateLocalHistoryRequest(target, sequence, replyTo);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public CreateLocalHistoryRequest(final LocalHistoryIdentifier target, final ActorRef replyTo) {
@@ -34,9 +40,8 @@ public final class CreateLocalHistoryRequest extends LocalHistoryRequest<CreateL
     }
 
     @Override
-    protected AbstractLocalHistoryRequestProxy<CreateLocalHistoryRequest> externalizableProxy(
-            final ABIVersion version) {
-        return new CreateLocalHistoryRequestProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new CHR(this);
     }
 
     @Override
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CreateLocalHistoryRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CreateLocalHistoryRequestProxyV1.java
deleted file mode 100644 (file)
index b61c9f5..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-
-/**
- * Externalizable proxy for use with {@link CreateLocalHistoryRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class CreateLocalHistoryRequestProxyV1 extends AbstractLocalHistoryRequestProxy<CreateLocalHistoryRequest> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public CreateLocalHistoryRequestProxyV1() {
-        // For Externalizable
-    }
-
-    CreateLocalHistoryRequestProxyV1(final CreateLocalHistoryRequest request) {
-        super(request);
-    }
-
-    @Override
-    protected CreateLocalHistoryRequest createRequest(final LocalHistoryIdentifier target, final long sequence,
-            final ActorRef replyTo) {
-        return new CreateLocalHistoryRequest(target, sequence, replyTo);
-    }
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DHR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DHR.java
new file mode 100644 (file)
index 0000000..ebd0f02
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link DestroyLocalHistoryRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class DHR implements DestroyLocalHistoryRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private DestroyLocalHistoryRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public DHR() {
+        // for Externalizable
+    }
+
+    DHR(final DestroyLocalHistoryRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public DestroyLocalHistoryRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final DestroyLocalHistoryRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
index 7df84763c5ed4fa491efe6dc62c5fca072c9b344..a91eb6971c3c4d2f173122e0dfb03b294dfe443e 100644 (file)
@@ -7,7 +7,6 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import com.google.common.collect.RangeSet;
 import com.google.common.primitives.UnsignedLong;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
@@ -15,11 +14,9 @@ import org.opendaylight.controller.cluster.access.concepts.RequestException;
 /**
  * A {@link RequestException} indicating that the backend has received a request to create a history which has already
  * been retired.
- *
- * @author Robert Varga
  */
-@Beta
 public final class DeadHistoryException extends RequestException {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public DeadHistoryException(final RangeSet<UnsignedLong> purgedHistories) {
index fee439984ac53c15fbdc37c09c618ba410bda150..0f259c1a947ba32d1b8bb1fe3f2dd02677e08357 100644 (file)
@@ -7,7 +7,6 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import com.google.common.collect.ImmutableRangeSet;
 import com.google.common.collect.RangeSet;
 import com.google.common.primitives.UnsignedLong;
@@ -16,11 +15,9 @@ import org.opendaylight.controller.cluster.access.concepts.RequestException;
 /**
  * A {@link RequestException} indicating that the backend has received a request to create a transaction which has
  * already been purged.
- *
- * @author Robert Varga
  */
-@Beta
 public final class DeadTransactionException extends RequestException {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final RangeSet<UnsignedLong> purgedIdentifiers;
index 375128318c43cf2bdf11395fd20ec95f0c42285a..5add5eb05451e9df6002b82772576780b3476abe 100644 (file)
@@ -8,17 +8,23 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.ObjectInput;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 
 /**
  * Request to destroy a local history.
- *
- * @author Robert Varga
  */
-@Beta
 public final class DestroyLocalHistoryRequest extends LocalHistoryRequest<DestroyLocalHistoryRequest> {
+    interface SerialForm extends LocalHistoryRequest.SerialForm<DestroyLocalHistoryRequest> {
+        @Override
+        default DestroyLocalHistoryRequest readExternal(final ObjectInput in, final LocalHistoryIdentifier target,
+                final long sequence, final ActorRef replyTo) {
+            return new DestroyLocalHistoryRequest(target, sequence, replyTo);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public DestroyLocalHistoryRequest(final LocalHistoryIdentifier target, final long sequence,
@@ -31,9 +37,8 @@ public final class DestroyLocalHistoryRequest extends LocalHistoryRequest<Destro
     }
 
     @Override
-    protected AbstractLocalHistoryRequestProxy<DestroyLocalHistoryRequest> externalizableProxy(
-            final ABIVersion version) {
-        return new DestroyLocalHistoryRequestProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new DHR(this);
     }
 
     @Override
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DestroyLocalHistoryRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DestroyLocalHistoryRequestProxyV1.java
deleted file mode 100644 (file)
index 0ebd690..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-
-/**
- * Externalizable proxy for use with {@link DestroyLocalHistoryRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class DestroyLocalHistoryRequestProxyV1 extends AbstractLocalHistoryRequestProxy<DestroyLocalHistoryRequest> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public DestroyLocalHistoryRequestProxyV1() {
-        // For Externalizable
-    }
-
-    DestroyLocalHistoryRequestProxyV1(final DestroyLocalHistoryRequest request) {
-        super(request);
-    }
-
-    @Override
-    protected DestroyLocalHistoryRequest createRequest(final LocalHistoryIdentifier target, final long sequence,
-            final ActorRef replyTo) {
-        return new DestroyLocalHistoryRequest(target, sequence, replyTo);
-    }
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ETR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ETR.java
new file mode 100644 (file)
index 0000000..26964e4
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link ExistsTransactionRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class ETR implements ExistsTransactionRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ExistsTransactionRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public ETR() {
+        // for Externalizable
+    }
+
+    ETR(final ExistsTransactionRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public ExistsTransactionRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final ExistsTransactionRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ETS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ETS.java
new file mode 100644 (file)
index 0000000..ad8564b
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link ExistsTransactionSuccess}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class ETS implements TransactionSuccess.SerialForm<ExistsTransactionSuccess> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ExistsTransactionSuccess message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public ETS() {
+        // for Externalizable
+    }
+
+    ETS(final ExistsTransactionSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public ExistsTransactionSuccess message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final ExistsTransactionSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out, final ExistsTransactionSuccess msg) throws IOException {
+        out.writeBoolean(msg.getExists());
+    }
+
+    @Override
+    public ExistsTransactionSuccess readExternal(final ObjectInput in, final TransactionIdentifier target,
+            final long sequence) throws IOException {
+        return new ExistsTransactionSuccess(target, sequence, in.readBoolean());
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
index ab2316c28edb63eb65b9f3d48b9cc8a9b99d869f..06c2797ca41d8d7bd6f4e745902aa7e553859e55 100644 (file)
@@ -8,7 +8,8 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.IOException;
+import java.io.ObjectInput;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
@@ -16,15 +17,22 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 
 /**
  * A transaction request to query if a particular path exists in the current view of a particular transaction.
- *
- * @author Robert Varga
  */
-@Beta
 public final class ExistsTransactionRequest extends AbstractReadPathTransactionRequest<ExistsTransactionRequest> {
+    interface SerialForm extends AbstractReadPathTransactionRequest.SerialForm<ExistsTransactionRequest> {
+        @Override
+        default ExistsTransactionRequest readExternal(final ObjectInput in, final TransactionIdentifier target,
+            final long sequence, final ActorRef replyTo, final boolean snapshotOnly, final YangInstanceIdentifier path)
+                throws IOException {
+            return new ExistsTransactionRequest(target, sequence, replyTo, path, snapshotOnly);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public ExistsTransactionRequest(final @NonNull TransactionIdentifier identifier, final long sequence,
-            final @NonNull  ActorRef replyTo, final @NonNull YangInstanceIdentifier path, final boolean snapshotOnly) {
+            final @NonNull ActorRef replyTo, final @NonNull YangInstanceIdentifier path, final boolean snapshotOnly) {
         super(identifier, sequence, replyTo, path, snapshotOnly);
     }
 
@@ -38,7 +46,7 @@ public final class ExistsTransactionRequest extends AbstractReadPathTransactionR
     }
 
     @Override
-    protected ExistsTransactionRequestProxyV1 externalizableProxy(final ABIVersion version) {
-        return new ExistsTransactionRequestProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new ETR(this);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionRequestProxyV1.java
deleted file mode 100644 (file)
index 2429947..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-/**
- * Externalizable proxy for use with {@link ExistsTransactionRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ExistsTransactionRequestProxyV1 extends
-        AbstractReadPathTransactionRequestProxyV1<ExistsTransactionRequest> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public ExistsTransactionRequestProxyV1() {
-        // For Externalizable
-    }
-
-    ExistsTransactionRequestProxyV1(final ExistsTransactionRequest request) {
-        super(request);
-    }
-
-    @Override
-    ExistsTransactionRequest createReadPathRequest(final TransactionIdentifier target, final long sequence,
-            final ActorRef replyTo, final YangInstanceIdentifier path, final boolean snapshotOnly) {
-        return new ExistsTransactionRequest(target, sequence, replyTo, path, snapshotOnly);
-    }
-}
index 8a1704de763725d3e2aaa0a2aad8baba43c00f36..72dee3aefa365fd4a4fbee999e786574e4db6a31 100644 (file)
@@ -7,7 +7,6 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects.ToStringHelper;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
@@ -15,14 +14,18 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
 /**
  * Successful reply to an {@link ExistsTransactionRequest}. It indicates presence of requested data via
  * {@link #getExists()}.
- *
- * @author Robert Varga
  */
-@Beta
 public final class ExistsTransactionSuccess extends TransactionSuccess<ExistsTransactionSuccess> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+
     private final boolean exists;
 
+    private ExistsTransactionSuccess(final ExistsTransactionSuccess success, final ABIVersion version) {
+        super(success, version);
+        exists = success.exists;
+    }
+
     public ExistsTransactionSuccess(final TransactionIdentifier target, final long sequence, final boolean exists) {
         super(target, sequence);
         this.exists = exists;
@@ -33,13 +36,13 @@ public final class ExistsTransactionSuccess extends TransactionSuccess<ExistsTra
     }
 
     @Override
-    protected ExistsTransactionSuccessProxyV1 externalizableProxy(final ABIVersion version) {
-        return new ExistsTransactionSuccessProxyV1(this);
+    protected ETS externalizableProxy(final ABIVersion version) {
+        return new ETS(this);
     }
 
     @Override
     protected ExistsTransactionSuccess cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new ExistsTransactionSuccess(this, version);
     }
 
     @Override
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionSuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionSuccessProxyV1.java
deleted file mode 100644 (file)
index 24de176..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link ExistsTransactionSuccess}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ExistsTransactionSuccessProxyV1 extends AbstractTransactionSuccessProxy<ExistsTransactionSuccess> {
-    private static final long serialVersionUID = 1L;
-    private boolean exists;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public ExistsTransactionSuccessProxyV1() {
-        // For Externalizable
-    }
-
-    ExistsTransactionSuccessProxyV1(final ExistsTransactionSuccess request) {
-        super(request);
-        this.exists = request.getExists();
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-        out.writeBoolean(exists);
-    }
-
-    @Override
-    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-        super.readExternal(in);
-        exists = in.readBoolean();
-    }
-
-    @Override
-    protected ExistsTransactionSuccess createSuccess(final TransactionIdentifier target, final long sequence) {
-        return new ExistsTransactionSuccess(target, sequence, exists);
-    }
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/HF.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/HF.java
new file mode 100644 (file)
index 0000000..68e9b09
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link LocalHistoryFailure}. It implements the Chlorine SR2 serialization format.
+ */
+final class HF implements LocalHistoryFailure.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private LocalHistoryFailure message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public HF() {
+        // for Externalizable
+    }
+
+    HF(final LocalHistoryFailure message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public LocalHistoryFailure message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final LocalHistoryFailure message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/HS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/HS.java
new file mode 100644 (file)
index 0000000..4ab0ff5
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link LocalHistorySuccess}. It implements the Chlorine SR2 serialization format.
+ */
+final class HS implements LocalHistorySuccess.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private LocalHistorySuccess message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public HS() {
+        // for Externalizable
+    }
+
+    HS(final LocalHistorySuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public LocalHistorySuccess message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final LocalHistorySuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ITSR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ITSR.java
new file mode 100644 (file)
index 0000000..ef76f5e
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link IncrementTransactionSequenceRequest}. It implements the Chlorine SR2
+ * serialization format.
+ */
+final class ITSR implements IncrementTransactionSequenceRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private IncrementTransactionSequenceRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public ITSR() {
+        // for Externalizable
+    }
+
+    ITSR(final IncrementTransactionSequenceRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public IncrementTransactionSequenceRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final IncrementTransactionSequenceRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ITSS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ITSS.java
new file mode 100644 (file)
index 0000000..7252d58
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link IncrementTransactionSequenceSuccess}. It implements the Chlorine SR2
+ * serialization format.
+ */
+final class ITSS implements TransactionSuccess.SerialForm<IncrementTransactionSequenceSuccess> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private IncrementTransactionSequenceSuccess message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public ITSS() {
+        // for Externalizable
+    }
+
+    ITSS(final IncrementTransactionSequenceSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public IncrementTransactionSequenceSuccess message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final IncrementTransactionSequenceSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public IncrementTransactionSequenceSuccess readExternal(final ObjectInput it, final TransactionIdentifier target,
+            final long sequence) {
+        return new IncrementTransactionSequenceSuccess(target, sequence);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
index ffc0a68b8912481bfeca882545ff3626469025e0..5695860e17cd7b0ba48d3dada038baf63c7620d8 100644 (file)
@@ -7,28 +7,55 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
+import static com.google.common.base.Preconditions.checkArgument;
+
 import akka.actor.ActorRef;
-import com.google.common.base.Preconditions;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.yangtools.concepts.WritableObjects;
 
 /**
  * A blank transaction request. This is used to provide backfill requests in converted retransmit scenarios, such as
  * when a initial request to a transaction (such as a {@link ReadTransactionRequest}) is satisfied by the backend
  * before the need to replay the transaction to a different remote backend.
- *
- * @author Robert Varga
  */
 public final class IncrementTransactionSequenceRequest extends
         AbstractReadTransactionRequest<IncrementTransactionSequenceRequest> {
+    interface SerialForm extends AbstractReadTransactionRequest.SerialForm<IncrementTransactionSequenceRequest> {
+        @Override
+        default void writeExternal(final ObjectOutput out, final IncrementTransactionSequenceRequest msg)
+                throws IOException {
+            AbstractReadTransactionRequest.SerialForm.super.writeExternal(out, msg);
+            WritableObjects.writeLong(out, msg.getIncrement());
+        }
+
+        @Override
+        default IncrementTransactionSequenceRequest readExternal(final ObjectInput in,
+                final TransactionIdentifier target, final long sequence, final ActorRef replyTo,
+                final boolean snapshotOnly) throws IOException {
+            return new IncrementTransactionSequenceRequest(target, sequence, replyTo, snapshotOnly,
+                WritableObjects.readLong(in));
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final long increment;
 
+    public IncrementTransactionSequenceRequest(final IncrementTransactionSequenceRequest request,
+            final ABIVersion version) {
+        super(request, version);
+        increment = request.increment;
+    }
+
     public IncrementTransactionSequenceRequest(final TransactionIdentifier identifier, final long sequence,
             final ActorRef replyTo, final boolean snapshotOnly, final long increment) {
         super(identifier, sequence, replyTo, snapshotOnly);
-        Preconditions.checkArgument(increment >= 0);
+        checkArgument(increment >= 0, "Unexpected increment %s", increment);
         this.increment = increment;
     }
 
@@ -42,12 +69,12 @@ public final class IncrementTransactionSequenceRequest extends
     }
 
     @Override
-    protected IncrementTransactionSequenceRequestProxyV1 externalizableProxy(final ABIVersion version) {
-        return new IncrementTransactionSequenceRequestProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new ITSR(this);
     }
 
     @Override
     protected IncrementTransactionSequenceRequest cloneAsVersion(final ABIVersion targetVersion) {
-        return this;
+        return new IncrementTransactionSequenceRequest(this, targetVersion);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceRequestProxyV1.java
deleted file mode 100644 (file)
index da1659e..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2017 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.concepts.WritableObjects;
-
-final class IncrementTransactionSequenceRequestProxyV1
-        extends AbstractReadTransactionRequestProxyV1<IncrementTransactionSequenceRequest> {
-    private long increment;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public IncrementTransactionSequenceRequestProxyV1() {
-        // For Externalizable
-    }
-
-    IncrementTransactionSequenceRequestProxyV1(final IncrementTransactionSequenceRequest request) {
-        super(request);
-        this.increment = request.getIncrement();
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-        WritableObjects.writeLong(out, increment);
-    }
-
-    @Override
-    public void readExternal(final ObjectInput in) throws ClassNotFoundException, IOException {
-        super.readExternal(in);
-        increment = WritableObjects.readLong(in);
-    }
-
-    @Override
-    IncrementTransactionSequenceRequest createReadRequest(final TransactionIdentifier target, final long sequence,
-            final ActorRef replyToActor, final boolean snapshotOnly) {
-        return new IncrementTransactionSequenceRequest(target, sequence, replyToActor, snapshotOnly, increment);
-    }
-}
index 80f4a0d5aab941bf64c8d6a868c13cc70cdeda15..4f27f76563f7649594e067799580f2c1ea60d0bc 100644 (file)
@@ -7,30 +7,32 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
 /**
  * Successful reply to an {@link IncrementTransactionSequenceRequest}.
- *
- * @author Robert Varga
  */
-@Beta
 public final class IncrementTransactionSequenceSuccess extends TransactionSuccess<IncrementTransactionSequenceSuccess> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
+    private IncrementTransactionSequenceSuccess(final IncrementTransactionSequenceSuccess success,
+            final ABIVersion version) {
+        super(success, version);
+    }
+
     public IncrementTransactionSequenceSuccess(final TransactionIdentifier target, final long sequence) {
         super(target, sequence);
     }
 
     @Override
-    protected IncrementTransactionSequenceSuccessProxyV1 externalizableProxy(final ABIVersion version) {
-        return new IncrementTransactionSequenceSuccessProxyV1(this);
+    protected ITSS externalizableProxy(final ABIVersion version) {
+        return new ITSS(this);
     }
 
     @Override
     protected IncrementTransactionSequenceSuccess cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new IncrementTransactionSequenceSuccess(this, version);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceSuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceSuccessProxyV1.java
deleted file mode 100644 (file)
index a99faab..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link IncrementTransactionSequenceSuccess}. It implements the initial (Boron)
- * serialization format.
- *
- * @author Robert Varga
- */
-final class IncrementTransactionSequenceSuccessProxyV1
-        extends AbstractTransactionSuccessProxy<IncrementTransactionSequenceSuccess> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public IncrementTransactionSequenceSuccessProxyV1() {
-        // For Externalizable
-    }
-
-    IncrementTransactionSequenceSuccessProxyV1(final IncrementTransactionSequenceSuccess request) {
-        super(request);
-    }
-
-    @Override
-    protected IncrementTransactionSequenceSuccess createSuccess(final TransactionIdentifier target,
-            final long sequence) {
-        return new IncrementTransactionSequenceSuccess(target, sequence);
-    }
-}
index 4fd69c24cee0e2ec2948f768975683a683038faa..fc24d8aedacbf3bf5105d7ecb6b9b7f836f6dcc2 100644 (file)
@@ -7,7 +7,8 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
+import java.io.DataInput;
+import java.io.IOException;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
@@ -15,24 +16,39 @@ import org.opendaylight.controller.cluster.access.concepts.RequestFailure;
 
 /**
  * Generic {@link RequestFailure} involving a {@link LocalHistoryRequest}.
- *
- * @author Robert Varga
  */
-@Beta
 public final class LocalHistoryFailure extends RequestFailure<LocalHistoryIdentifier, LocalHistoryFailure> {
+    interface SerialForm extends RequestFailure.SerialForm<LocalHistoryIdentifier, LocalHistoryFailure> {
+        @Override
+        default LocalHistoryIdentifier readTarget(final DataInput in) throws IOException {
+            return LocalHistoryIdentifier.readFrom(in);
+        }
+
+        @Override
+        default LocalHistoryFailure createFailure(final LocalHistoryIdentifier target, final long sequence,
+                final RequestException cause) {
+            return new LocalHistoryFailure(target, sequence, cause);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
+    private LocalHistoryFailure(final LocalHistoryFailure failure, final ABIVersion version) {
+        super(failure, version);
+    }
+
     LocalHistoryFailure(final LocalHistoryIdentifier target, final long sequence, final RequestException cause) {
         super(target, sequence, cause);
     }
 
     @Override
-    protected LocalHistoryFailure cloneAsVersion(final ABIVersion version) {
-        return this;
+    protected LocalHistoryFailure cloneAsVersion(final ABIVersion targetVersion) {
+        return new LocalHistoryFailure(this, targetVersion);
     }
 
     @Override
-    protected LocalHistoryFailureProxyV1 externalizableProxy(final ABIVersion version) {
-        return new LocalHistoryFailureProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new HF(this);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryFailureProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryFailureProxyV1.java
deleted file mode 100644 (file)
index 0d3a687..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.DataInput;
-import java.io.IOException;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.RequestException;
-
-/**
- * Externalizable proxy for use with {@link LocalHistoryFailure}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class LocalHistoryFailureProxyV1 extends
-        AbstractRequestFailureProxy<LocalHistoryIdentifier, LocalHistoryFailure> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public LocalHistoryFailureProxyV1() {
-        // For Externalizable
-    }
-
-    LocalHistoryFailureProxyV1(final LocalHistoryFailure failure) {
-        super(failure);
-    }
-
-    @Override
-    protected LocalHistoryFailure createFailure(final LocalHistoryIdentifier target, final long sequence,
-            final RequestException cause) {
-        return new LocalHistoryFailure(target, sequence, cause);
-    }
-
-    @Override
-    protected LocalHistoryIdentifier readTarget(final DataInput in) throws IOException {
-        return LocalHistoryIdentifier.readFrom(in);
-    }
-}
index 33d04850fb91d01a524a7f17ffc12820445e392a..c304384fd8b7089729c03ba8108aee1ba86c67f3 100644 (file)
@@ -8,8 +8,9 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
 import com.google.common.base.Preconditions;
+import java.io.DataInput;
+import java.io.IOException;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.Request;
@@ -19,12 +20,17 @@ import org.opendaylight.controller.cluster.access.concepts.RequestException;
  * Abstract base class for {@link Request}s involving specific local history. This class is visible outside of this
  * package solely for the ability to perform a unified instanceof check.
  *
- * @author Robert Varga
- *
  * @param <T> Message type
  */
-@Beta
 public abstract class LocalHistoryRequest<T extends LocalHistoryRequest<T>> extends Request<LocalHistoryIdentifier, T> {
+    interface SerialForm<T extends LocalHistoryRequest<T>> extends Request.SerialForm<LocalHistoryIdentifier, T> {
+        @Override
+        default LocalHistoryIdentifier readTarget(final DataInput in) throws IOException {
+            return LocalHistoryIdentifier.readFrom(in);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     LocalHistoryRequest(final LocalHistoryIdentifier target, final long sequence, final ActorRef replyTo) {
@@ -42,5 +48,5 @@ public abstract class LocalHistoryRequest<T extends LocalHistoryRequest<T>> exte
     }
 
     @Override
-    protected abstract AbstractLocalHistoryRequestProxy<T> externalizableProxy(ABIVersion version);
+    protected abstract SerialForm<T> externalizableProxy(ABIVersion version);
 }
index 3b8ed35816ede5bb36a0e41ddaa57f7d74ae5971..7c0e1865c12585f75736d42dc6e54b8ffdd14e40 100644 (file)
@@ -7,37 +7,48 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
+import java.io.DataInput;
+import java.io.IOException;
+import java.io.ObjectInput;
 import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.controller.cluster.access.concepts.AbstractSuccessProxy;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
 
 /**
  * Success class for {@link RequestSuccess}es involving a specific local history.
- *
- * @author Robert Varga
  */
-@Beta
 public final class LocalHistorySuccess extends RequestSuccess<LocalHistoryIdentifier, LocalHistorySuccess> {
-    private static final long serialVersionUID = 1L;
+    interface SerialForm extends RequestSuccess.SerialForm<LocalHistoryIdentifier, LocalHistorySuccess> {
+        @Override
+        default LocalHistoryIdentifier readTarget(final DataInput in) throws IOException {
+            return LocalHistoryIdentifier.readFrom(in);
+        }
 
-    public LocalHistorySuccess(final LocalHistoryIdentifier target, final long sequence) {
-        super(target, sequence);
+        @Override
+        default LocalHistorySuccess readExternal(final ObjectInput it, final LocalHistoryIdentifier target,
+                final long sequence) {
+            return new LocalHistorySuccess(target, sequence);
+        }
     }
 
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
     private LocalHistorySuccess(final LocalHistorySuccess success, final ABIVersion version) {
         super(success, version);
     }
 
+    public LocalHistorySuccess(final LocalHistoryIdentifier target, final long sequence) {
+        super(target, sequence);
+    }
+
     @Override
     protected LocalHistorySuccess cloneAsVersion(final ABIVersion version) {
         return new LocalHistorySuccess(this, version);
     }
 
     @Override
-    protected AbstractSuccessProxy<LocalHistoryIdentifier, LocalHistorySuccess> externalizableProxy(
-            final ABIVersion version) {
-        return new LocalHistorySuccessProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new HS(this);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistorySuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistorySuccessProxyV1.java
deleted file mode 100644 (file)
index 97a7a1c..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.DataInput;
-import java.io.IOException;
-import org.opendaylight.controller.cluster.access.concepts.AbstractSuccessProxy;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-
-/**
- * Serialization proxy associated with {@link LocalHistorySuccess}.
- *
- * @author Robert Varga
- */
-final class LocalHistorySuccessProxyV1 extends AbstractSuccessProxy<LocalHistoryIdentifier, LocalHistorySuccess> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public LocalHistorySuccessProxyV1() {
-        // For Externalizable
-    }
-
-    LocalHistorySuccessProxyV1(final LocalHistorySuccess success) {
-        super(success);
-    }
-
-    @Override
-    protected LocalHistoryIdentifier readTarget(final DataInput in) throws IOException {
-        return LocalHistoryIdentifier.readFrom(in);
-    }
-
-    @Override
-    protected LocalHistorySuccess createSuccess(final LocalHistoryIdentifier target, final long sequence) {
-        return new LocalHistorySuccess(target, sequence);
-    }
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/MTR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/MTR.java
new file mode 100644 (file)
index 0000000..a0c5acf
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link ModifyTransactionRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class MTR implements ModifyTransactionRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ModifyTransactionRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public MTR() {
+        // for Externalizable
+    }
+
+    MTR(final ModifyTransactionRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public ModifyTransactionRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final ModifyTransactionRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/MTS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/MTS.java
new file mode 100644 (file)
index 0000000..ee7e876
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link ModifyTransactionSuccess}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class MTS implements TransactionSuccess.SerialForm<ModifyTransactionSuccess> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ModifyTransactionSuccess message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public MTS() {
+        // for Externalizable
+    }
+
+    MTS(final ModifyTransactionSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public ModifyTransactionSuccess message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final ModifyTransactionSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public ModifyTransactionSuccess readExternal(final ObjectInput in, final TransactionIdentifier target,
+            final long sequence) throws IOException {
+        return new ModifyTransactionSuccess(target, sequence);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
index 39b577cef26b67d9739fb32fe32b5d739a1c1908..b1ddd389306ac9ad5e38f8abbcd155dd91a1dec7 100644 (file)
@@ -8,33 +8,83 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects.ToStringHelper;
 import com.google.common.collect.ImmutableList;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutput;
+import java.io.ObjectOutputStream;
+import java.io.ObjectStreamException;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.Optional;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.SliceableMessage;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
+import org.opendaylight.yangtools.yang.data.impl.schema.ReusableImmutableNormalizedNodeStreamWriter;
 
 /**
  * A transaction request to apply a particular set of operations on top of the current transaction. This message is
  * used to also finish a transaction by specifying a {@link PersistenceProtocol}.
- *
- * @author Robert Varga
  */
-@Beta
 public final class ModifyTransactionRequest extends TransactionRequest<ModifyTransactionRequest>
         implements SliceableMessage {
+    interface SerialForm extends TransactionRequest.SerialForm<ModifyTransactionRequest> {
+
+
+        @Override
+        default ModifyTransactionRequest readExternal(final ObjectInput in, final TransactionIdentifier target,
+                final long sequence, final ActorRef replyTo) throws IOException {
+
+            final var protocol = Optional.ofNullable(PersistenceProtocol.readFrom(in));
+            final int size = in.readInt();
+            final List<TransactionModification> modifications;
+            if (size != 0) {
+                modifications = new ArrayList<>(size);
+                final var nnin = NormalizedNodeDataInput.newDataInput(in);
+                final var writer = ReusableImmutableNormalizedNodeStreamWriter.create();
+                for (int i = 0; i < size; ++i) {
+                    modifications.add(TransactionModification.readFrom(nnin, writer));
+                }
+            } else {
+                modifications = ImmutableList.of();
+            }
+
+            return new ModifyTransactionRequest(target, sequence, replyTo, modifications, protocol.orElse(null));
+        }
+
+        @Override
+        default void writeExternal(final ObjectOutput out, final ModifyTransactionRequest msg) throws IOException {
+            TransactionRequest.SerialForm.super.writeExternal(out, msg);
+
+            out.writeByte(PersistenceProtocol.byteValue(msg.getPersistenceProtocol().orElse(null)));
+
+            final var modifications = msg.getModifications();
+            out.writeInt(modifications.size());
+            if (!modifications.isEmpty()) {
+                try (var nnout = msg.getVersion().getStreamVersion().newDataOutput(out)) {
+                    for (var op : modifications) {
+                        op.writeTo(nnout);
+                    }
+                }
+            }
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
-    @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
-            + "implements writeReplace to delegate serialization to a Proxy class and thus instances of this class "
-            + "aren't serialized. FindBugs does not recognize this.")
     private final List<TransactionModification> modifications;
     private final PersistenceProtocol protocol;
 
+    private ModifyTransactionRequest(final ModifyTransactionRequest request, final ABIVersion version) {
+        super(request, version);
+        modifications = request.modifications;
+        protocol = request.protocol;
+    }
+
     ModifyTransactionRequest(final TransactionIdentifier target, final long sequence, final ActorRef replyTo,
         final List<TransactionModification> modifications, final PersistenceProtocol protocol) {
         super(target, sequence, replyTo);
@@ -57,12 +107,27 @@ public final class ModifyTransactionRequest extends TransactionRequest<ModifyTra
     }
 
     @Override
-    protected ModifyTransactionRequestProxyV1 externalizableProxy(final ABIVersion version) {
-        return new ModifyTransactionRequestProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new MTR(this);
     }
 
     @Override
     protected ModifyTransactionRequest cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new ModifyTransactionRequest(this, version);
+    }
+
+    @java.io.Serial
+    private void readObject(final ObjectInputStream stream) throws IOException, ClassNotFoundException {
+        throwNSE();
+    }
+
+    @java.io.Serial
+    private void readObjectNoData() throws ObjectStreamException {
+        throwNSE();
+    }
+
+    @java.io.Serial
+    private void writeObject(final ObjectOutputStream stream) throws IOException {
+        throwNSE();
     }
 }
index b0a0cade210f76af53e68e7cbc2dbd6fbc7b269d..0e99942c1ee9655bb32ac996d9462ec346cefa8d 100644 (file)
@@ -11,25 +11,19 @@ import static com.google.common.base.Preconditions.checkState;
 import static java.util.Objects.requireNonNull;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
 import java.util.ArrayList;
 import java.util.List;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.concepts.Builder;
 import org.opendaylight.yangtools.concepts.Identifiable;
 
 /**
- * A reusable {@link Builder} for creating {@link ModifyTransactionRequest} message instances. Its internal state is
- * reset when {@link #build()} is invoked, hence it can be used to create a sequence of messages. This class is NOT
- * thread-safe.
- *
- * @author Robert Varga
+ * A reusable builder for creating {@link ModifyTransactionRequest} message instances. Its internal state is reset when
+ * {@link #build()} is invoked, hence it can be used to create a sequence of messages. This class is NOT thread-safe.
  */
-@Beta
-public final class ModifyTransactionRequestBuilder implements Builder<ModifyTransactionRequest>,
-        Identifiable<TransactionIdentifier> {
+public final class ModifyTransactionRequestBuilder implements Identifiable<TransactionIdentifier> {
     private final List<TransactionModification> modifications = new ArrayList<>(1);
-    private final TransactionIdentifier identifier;
+    private final @NonNull TransactionIdentifier identifier;
     private final ActorRef replyTo;
 
     private PersistenceProtocol protocol;
@@ -82,8 +76,7 @@ public final class ModifyTransactionRequestBuilder implements Builder<ModifyTran
         return modifications.size();
     }
 
-    @Override
-    public ModifyTransactionRequest build() {
+    public @NonNull ModifyTransactionRequest build() {
         checkState(haveSequence, "Request sequence has not been set");
 
         final ModifyTransactionRequest ret = new ModifyTransactionRequest(identifier, sequence, replyTo, modifications,
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestProxyV1.java
deleted file mode 100644 (file)
index 384c889..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorRef;
-import com.google.common.collect.ImmutableList;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Optional;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
-import org.opendaylight.yangtools.yang.data.impl.schema.ReusableImmutableNormalizedNodeStreamWriter;
-
-/**
- * Externalizable proxy for use with {@link ExistsTransactionRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ModifyTransactionRequestProxyV1 extends AbstractTransactionRequestProxy<ModifyTransactionRequest> {
-    private static final long serialVersionUID = 1L;
-
-    private List<TransactionModification> modifications;
-    private Optional<PersistenceProtocol> protocol;
-    private transient NormalizedNodeStreamVersion streamVersion;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public ModifyTransactionRequestProxyV1() {
-        // For Externalizable
-    }
-
-    ModifyTransactionRequestProxyV1(final ModifyTransactionRequest request) {
-        super(request);
-        this.modifications = requireNonNull(request.getModifications());
-        this.protocol = request.getPersistenceProtocol();
-        this.streamVersion = request.getVersion().getStreamVersion();
-    }
-
-    @Override
-    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-        super.readExternal(in);
-
-        protocol = Optional.ofNullable(PersistenceProtocol.readFrom(in));
-
-        final int size = in.readInt();
-        if (size != 0) {
-            modifications = new ArrayList<>(size);
-            final NormalizedNodeDataInput nnin = NormalizedNodeDataInput.newDataInput(in);
-            final ReusableImmutableNormalizedNodeStreamWriter writer =
-                    ReusableImmutableNormalizedNodeStreamWriter.create();
-            for (int i = 0; i < size; ++i) {
-                modifications.add(TransactionModification.readFrom(nnin, writer));
-            }
-        } else {
-            modifications = ImmutableList.of();
-        }
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-
-        out.writeByte(PersistenceProtocol.byteValue(protocol.orElse(null)));
-        out.writeInt(modifications.size());
-        if (!modifications.isEmpty()) {
-            try (NormalizedNodeDataOutput nnout = streamVersion.newDataOutput(out)) {
-                for (TransactionModification op : modifications) {
-                    op.writeTo(nnout);
-                }
-            }
-        }
-    }
-
-    @Override
-    protected ModifyTransactionRequest createRequest(final TransactionIdentifier target, final long sequence,
-            final ActorRef replyTo) {
-        return new ModifyTransactionRequest(target, sequence, replyTo, modifications, protocol.orElse(null));
-    }
-}
index c4dd20d6c9a0bf660c648fa11069cae82b0a09fa..38adf787b96c64116414b8cc290ed67ecd38a2c8 100644 (file)
@@ -7,17 +7,14 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
 /**
  * Response to a {@link ModifyTransactionRequest} which does not have a {@link PersistenceProtocol}.
- *
- * @author Robert Varga
  */
-@Beta
 public final class ModifyTransactionSuccess extends TransactionSuccess<ModifyTransactionSuccess> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public ModifyTransactionSuccess(final TransactionIdentifier identifier, final long sequence) {
@@ -29,8 +26,8 @@ public final class ModifyTransactionSuccess extends TransactionSuccess<ModifyTra
     }
 
     @Override
-    protected AbstractTransactionSuccessProxy<ModifyTransactionSuccess> externalizableProxy(final ABIVersion version) {
-        return new ModifyTransactionSuccessProxyV1(this);
+    protected MTS externalizableProxy(final ABIVersion version) {
+        return new MTS(this);
     }
 
     @Override
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionSuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionSuccessProxyV1.java
deleted file mode 100644 (file)
index 0efff09..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link ModifyTransactionSuccess}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ModifyTransactionSuccessProxyV1 extends AbstractTransactionSuccessProxy<ModifyTransactionSuccess> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public ModifyTransactionSuccessProxyV1() {
-        // For Externalizable
-    }
-
-    ModifyTransactionSuccessProxyV1(final ModifyTransactionSuccess success) {
-        super(success);
-    }
-
-    @Override
-    protected ModifyTransactionSuccess createSuccess(final TransactionIdentifier target, final long sequence) {
-        return new ModifyTransactionSuccess(target, sequence);
-    }
-}
index 0864cd0cf0c2fc7495fda9ed9aadf3314dbf780a..c4353c37fd6bf74a74e4186b9968bac5b6972cc7 100644 (file)
@@ -8,17 +8,14 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 
 /**
  * General error raised when the recipient of a Request is not the correct backend to talk to. This typically
  * means that the backend processing has moved and the frontend needs to run rediscovery and retry the request.
- *
- * @author Robert Varga
  */
-@Beta
 public final class NotLeaderException extends RequestException {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public NotLeaderException(final ActorRef me) {
index cd110d66b6aa708623fcca696a9daaec17872d44..0c908078ebbee07789f35dbeb335179c51837518 100644 (file)
@@ -7,17 +7,14 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 
 /**
  * A {@link RequestException} indicating that the backend has received a Request whose sequence does not match the
  * next expected sequence for the target. This is a hard error, as it indicates a Request is missing in the stream.
- *
- * @author Robert Varga
  */
-@Beta
 public final class OutOfOrderRequestException extends RequestException {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public OutOfOrderRequestException(final long expectedRequest) {
index ad3dd8d700966a56effac9c225498dec38b1fe57..b39e09a6a45ac570148b824d67da02c7f984f31c 100644 (file)
@@ -7,18 +7,15 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 
 /**
  * A {@link RequestException} indicating that the backend has received a RequestEnvelope whose sequence does not match
  * the next expected sequence. This can happen during leader transitions, when a part of the stream is rejected because
  * the backend is not the leader and it transitions to being a leader with old stream messages still being present.
- *
- * @author Robert Varga
  */
-@Beta
 public final class OutOfSequenceEnvelopeException extends RequestException {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public OutOfSequenceEnvelopeException(final long expectedEnvelope) {
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PHR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PHR.java
new file mode 100644 (file)
index 0000000..e2b3959
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link PurgeLocalHistoryRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class PHR implements PurgeLocalHistoryRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private PurgeLocalHistoryRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public PHR() {
+        // for Externalizable
+    }
+
+    PHR(final PurgeLocalHistoryRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public PurgeLocalHistoryRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final PurgeLocalHistoryRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
index be58b05b1fc87f3f3cdceb6f9ef6271dd210fbd8..82fca03087c6ac794caa188cd2928c989015af71 100644 (file)
@@ -7,19 +7,16 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.yangtools.concepts.WritableObject;
 
 /**
  * Enumeration of transaction persistence protocols. These govern which protocol is executed between the frontend
  * and backend to drive persistence of a particular transaction.
- *
- * @author Robert Varga
  */
-@Beta
 public enum PersistenceProtocol implements WritableObject {
     /**
      * Abort protocol. The transaction has been aborted on the frontend and its effects should not be visible
@@ -77,20 +74,14 @@ public enum PersistenceProtocol implements WritableObject {
         return finish == null ? 0 : finish.byteValue();
     }
 
-    static PersistenceProtocol valueOf(final byte value) {
-        switch (value) {
-            case 0:
-                return null;
-            case 1:
-                return ABORT;
-            case 2:
-                return SIMPLE;
-            case 3:
-                return THREE_PHASE;
-            case 4:
-                return READY;
-            default:
-                throw new IllegalArgumentException("Unhandled byte value " + value);
-        }
+    static @Nullable PersistenceProtocol valueOf(final byte value) {
+        return switch (value) {
+            case 0 -> null;
+            case 1 -> ABORT;
+            case 2 -> SIMPLE;
+            case 3 -> THREE_PHASE;
+            case 4 -> READY;
+            default -> throw new IllegalArgumentException("Unhandled byte value " + value);
+        };
     }
 }
index ecbd749dd1f8ada03f0f8a1b83beabc0ba0ef7a8..c9dc5dc1e7c3a8343c99eae7918b76b9e80670e9 100644 (file)
@@ -8,18 +8,24 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.ObjectInput;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 
 /**
  * Request to purge a local history. This request is sent by the client once it receives a successful reply to
  * {@link DestroyLocalHistoryRequest} and indicates it has removed all state attached to a particular local history.
- *
- * @author Robert Varga
  */
-@Beta
 public final class PurgeLocalHistoryRequest extends LocalHistoryRequest<PurgeLocalHistoryRequest> {
+    interface SerialForm extends LocalHistoryRequest.SerialForm<PurgeLocalHistoryRequest> {
+        @Override
+        default PurgeLocalHistoryRequest readExternal(final ObjectInput in, final LocalHistoryIdentifier target,
+                final long sequence, final ActorRef replyTo) {
+            return new PurgeLocalHistoryRequest(target, sequence, replyTo);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public PurgeLocalHistoryRequest(final LocalHistoryIdentifier target, final long sequence, final ActorRef replyTo) {
@@ -31,8 +37,8 @@ public final class PurgeLocalHistoryRequest extends LocalHistoryRequest<PurgeLoc
     }
 
     @Override
-    protected AbstractLocalHistoryRequestProxy<PurgeLocalHistoryRequest> externalizableProxy(final ABIVersion version) {
-        return new PurgeLocalHistoryRequestProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new PHR(this);
     }
 
     @Override
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PurgeLocalHistoryRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PurgeLocalHistoryRequestProxyV1.java
deleted file mode 100644 (file)
index 11c344f..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-
-/**
- * Externalizable proxy for use with {@link PurgeLocalHistoryRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class PurgeLocalHistoryRequestProxyV1 extends AbstractLocalHistoryRequestProxy<PurgeLocalHistoryRequest> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public PurgeLocalHistoryRequestProxyV1() {
-        // For Externalizable
-    }
-
-    PurgeLocalHistoryRequestProxyV1(final PurgeLocalHistoryRequest request) {
-        super(request);
-    }
-
-    @Override
-    protected PurgeLocalHistoryRequest createRequest(final LocalHistoryIdentifier target, final long sequence,
-            final ActorRef replyTo) {
-        return new PurgeLocalHistoryRequest(target, sequence, replyTo);
-    }
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/RTR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/RTR.java
new file mode 100644 (file)
index 0000000..e342c18
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link ReadTransactionRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class RTR implements ReadTransactionRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ReadTransactionRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public RTR() {
+        // for Externalizable
+    }
+
+    RTR(final ReadTransactionRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public ReadTransactionRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final ReadTransactionRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/RTS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/RTS.java
new file mode 100644 (file)
index 0000000..2c80834
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.Optional;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
+
+/**
+ * Externalizable proxy for use with {@link ReadTransactionSuccess}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class RTS implements TransactionSuccess.SerialForm<ReadTransactionSuccess> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ReadTransactionSuccess message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public RTS() {
+        // for Externalizable
+    }
+
+    RTS(final ReadTransactionSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public ReadTransactionSuccess message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final ReadTransactionSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public ReadTransactionSuccess readExternal(final ObjectInput in, final TransactionIdentifier target,
+            final long sequence) throws IOException {
+        final Optional<NormalizedNode> data;
+        if (in.readBoolean()) {
+            data = Optional.of(NormalizedNodeDataInput.newDataInput(in).readNormalizedNode());
+        } else {
+            data = Optional.empty();
+        }
+        return new ReadTransactionSuccess(target, sequence, data);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out, final ReadTransactionSuccess msg) throws IOException {
+        TransactionSuccess.SerialForm.super.writeExternal(out, msg);
+
+        final var data = msg.getData();
+        if (data.isPresent()) {
+            out.writeBoolean(true);
+            try (var nnout = msg.getVersion().getStreamVersion().newDataOutput(out)) {
+                nnout.writeNormalizedNode(data.orElseThrow());
+            }
+        } else {
+            out.writeBoolean(false);
+        }
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
index 92caa59b7acdeb641f4f655463e20876ba87ec2d..292496b7b43667f217177f4aa253f16206c039a9 100644 (file)
@@ -8,7 +8,8 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.IOException;
+import java.io.ObjectInput;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
@@ -16,11 +17,18 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 
 /**
  * A transaction request to read a particular path exists in the current view of a particular transaction.
- *
- * @author Robert Varga
  */
-@Beta
 public final class ReadTransactionRequest extends AbstractReadPathTransactionRequest<ReadTransactionRequest> {
+    interface SerialForm extends AbstractReadPathTransactionRequest.SerialForm<ReadTransactionRequest> {
+        @Override
+        default ReadTransactionRequest readExternal(final ObjectInput in, final TransactionIdentifier target,
+            final long sequence, final ActorRef replyTo, final boolean snapshotOnly, final YangInstanceIdentifier path)
+                throws IOException {
+            return new ReadTransactionRequest(target, sequence, replyTo, path, snapshotOnly);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public ReadTransactionRequest(final @NonNull TransactionIdentifier identifier, final long sequence,
@@ -38,7 +46,7 @@ public final class ReadTransactionRequest extends AbstractReadPathTransactionReq
     }
 
     @Override
-    protected ReadTransactionRequestProxyV1 externalizableProxy(final ABIVersion version) {
-        return new ReadTransactionRequestProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new RTR(this);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionRequestProxyV1.java
deleted file mode 100644 (file)
index a83b6bc..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-/**
- * Externalizable proxy for use with {@link ReadTransactionRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ReadTransactionRequestProxyV1 extends AbstractReadPathTransactionRequestProxyV1<ReadTransactionRequest> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public ReadTransactionRequestProxyV1() {
-        // For Externalizable
-    }
-
-    ReadTransactionRequestProxyV1(final ReadTransactionRequest request) {
-        super(request);
-    }
-
-    @Override
-    ReadTransactionRequest createReadPathRequest(final TransactionIdentifier target, final long sequence,
-            final ActorRef replyTo, final YangInstanceIdentifier path, final boolean snapshotOnly) {
-        return new ReadTransactionRequest(target, sequence, replyTo, path, snapshotOnly);
-    }
-}
index 1b3410ee3c0bdc7c8628aa3ae5f3877664118f60..a03766e9da32d88c7ad3950449ebe3d37b991d6b 100644 (file)
@@ -9,8 +9,10 @@ package org.opendaylight.controller.cluster.access.commands;
 
 import static java.util.Objects.requireNonNull;
 
-import com.google.common.annotations.Beta;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.ObjectStreamException;
 import java.util.Optional;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.SliceableMessage;
@@ -20,16 +22,19 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 /**
  * Successful reply to an {@link ReadTransactionRequest}. It indicates presence of requested data via
  * {@link #getData()}.
- *
- * @author Robert Varga
  */
-@Beta
-@SuppressFBWarnings("SE_BAD_FIELD")
 public final class ReadTransactionSuccess extends TransactionSuccess<ReadTransactionSuccess>
         implements SliceableMessage {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+
     private final Optional<NormalizedNode> data;
 
+    private ReadTransactionSuccess(final ReadTransactionSuccess request, final ABIVersion version) {
+        super(request, version);
+        data = request.data;
+    }
+
     public ReadTransactionSuccess(final TransactionIdentifier identifier, final long sequence,
             final Optional<NormalizedNode> data) {
         super(identifier, sequence);
@@ -41,12 +46,27 @@ public final class ReadTransactionSuccess extends TransactionSuccess<ReadTransac
     }
 
     @Override
-    protected AbstractTransactionSuccessProxy<ReadTransactionSuccess> externalizableProxy(final ABIVersion version) {
-        return new ReadTransactionSuccessProxyV1(this);
+    protected RTS externalizableProxy(final ABIVersion version) {
+        return new RTS(this);
     }
 
     @Override
     protected ReadTransactionSuccess cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new ReadTransactionSuccess(this, version);
+    }
+
+    @java.io.Serial
+    private void readObject(final ObjectInputStream stream) throws IOException, ClassNotFoundException {
+        throwNSE();
+    }
+
+    @java.io.Serial
+    private void readObjectNoData() throws ObjectStreamException {
+        throwNSE();
+    }
+
+    @java.io.Serial
+    private void writeObject(final ObjectOutputStream stream) throws IOException {
+        throwNSE();
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccessProxyV1.java
deleted file mode 100644 (file)
index d442f36..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.util.Optional;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
-
-/**
- * Externalizable proxy for use with {@link ReadTransactionSuccess}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ReadTransactionSuccessProxyV1 extends AbstractTransactionSuccessProxy<ReadTransactionSuccess> {
-    private static final long serialVersionUID = 1L;
-
-    private Optional<NormalizedNode> data;
-    private transient NormalizedNodeStreamVersion streamVersion;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public ReadTransactionSuccessProxyV1() {
-        // For Externalizable
-    }
-
-    ReadTransactionSuccessProxyV1(final ReadTransactionSuccess request) {
-        super(request);
-        this.data = request.getData();
-        this.streamVersion = request.getVersion().getStreamVersion();
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-
-        if (data.isPresent()) {
-            out.writeBoolean(true);
-            try (NormalizedNodeDataOutput nnout = streamVersion.newDataOutput(out)) {
-                nnout.writeNormalizedNode(data.get());
-            }
-        } else {
-            out.writeBoolean(false);
-        }
-    }
-
-    @Override
-    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-        super.readExternal(in);
-
-        if (in.readBoolean()) {
-            data = Optional.of(NormalizedNodeDataInput.newDataInput(in).readNormalizedNode());
-        } else {
-            data = Optional.empty();
-        }
-    }
-
-    @Override
-    protected ReadTransactionSuccess createSuccess(final TransactionIdentifier target, final long sequence) {
-        return new ReadTransactionSuccess(target, sequence, data);
-    }
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/STR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/STR.java
new file mode 100644 (file)
index 0000000..aa529ea
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link SkipTransactionsRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class STR implements SkipTransactionsRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private SkipTransactionsRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public STR() {
+        // for Externalizable
+    }
+
+    STR(final SkipTransactionsRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public SkipTransactionsRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final SkipTransactionsRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/STS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/STS.java
new file mode 100644 (file)
index 0000000..5489709
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link SkipTransactionsResponse}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class STS implements TransactionSuccess.SerialForm<SkipTransactionsResponse> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private SkipTransactionsResponse message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public STS() {
+        // for Externalizable
+    }
+
+    STS(final SkipTransactionsResponse message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public SkipTransactionsResponse message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final SkipTransactionsResponse message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public SkipTransactionsResponse readExternal(final ObjectInput in, final TransactionIdentifier target,
+            final long sequence) {
+        return new SkipTransactionsResponse(target, sequence);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
index dd5faa8e875b474f82c57597411be7039c43b93c..a2c037f784753ca19babfad0e6416888c3f8248c 100644 (file)
@@ -8,15 +8,18 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects.ToStringHelper;
 import com.google.common.collect.ImmutableList;
 import com.google.common.primitives.UnsignedLong;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
 import java.util.Collection;
 import java.util.List;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.yangtools.concepts.WritableObjects;
 
 /**
  * Request to skip a number of {@link TransactionIdentifier}s within a {code local history}. This request is essentially
@@ -27,8 +30,51 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
  * This request is sent by the frontend to inform the backend that a set of {@link TransactionIdentifier}s are
  * explicitly retired and are guaranteed to never be used by the frontend.
  */
-@Beta
 public final class SkipTransactionsRequest extends TransactionRequest<SkipTransactionsRequest> {
+    interface SerialForm extends TransactionRequest.SerialForm<SkipTransactionsRequest> {
+        @Override
+        default SkipTransactionsRequest readExternal(final ObjectInput in, final TransactionIdentifier target,
+                final long sequence, final ActorRef replyTo) throws IOException {
+            final int size = in.readInt();
+            final var builder = ImmutableList.<UnsignedLong>builderWithExpectedSize(size);
+            int idx;
+            if (size % 2 != 0) {
+                builder.add(UnsignedLong.fromLongBits(WritableObjects.readLong(in)));
+                idx = 1;
+            } else {
+                idx = 0;
+            }
+            for (; idx < size; idx += 2) {
+                final byte hdr = WritableObjects.readLongHeader(in);
+                builder.add(UnsignedLong.fromLongBits(WritableObjects.readFirstLong(in, hdr)));
+                builder.add(UnsignedLong.fromLongBits(WritableObjects.readSecondLong(in, hdr)));
+            }
+
+            return new SkipTransactionsRequest(target, sequence, replyTo, builder.build());
+        }
+
+        @Override
+        default void writeExternal(final ObjectOutput out, final SkipTransactionsRequest msg) throws IOException {
+            TransactionRequest.SerialForm.super.writeExternal(out, msg);
+
+            final var others = msg.others;
+            final int size = others.size();
+            out.writeInt(size);
+
+            int idx;
+            if (size % 2 != 0) {
+                WritableObjects.writeLong(out, others.get(0).longValue());
+                idx = 1;
+            } else {
+                idx = 0;
+            }
+            for (; idx < size; idx += 2) {
+                WritableObjects.writeLongs(out, others.get(idx).longValue(), others.get(idx + 1).longValue());
+            }
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     // Note: UnsignedLong is arbitrary, yang.common.Uint64 would work just as well, we really want an immutable
@@ -41,6 +87,11 @@ public final class SkipTransactionsRequest extends TransactionRequest<SkipTransa
         this.others = ImmutableList.copyOf(others);
     }
 
+    private SkipTransactionsRequest(final SkipTransactionsRequest request, final ABIVersion version) {
+        super(request, version);
+        others = request.others;
+    }
+
     /**
      * Return this {@link #getTarget()}s sibling {@link TransactionIdentifier}s.
      *
@@ -51,13 +102,13 @@ public final class SkipTransactionsRequest extends TransactionRequest<SkipTransa
     }
 
     @Override
-    protected SkipTransactionsRequestV1 externalizableProxy(final ABIVersion version) {
-        return new SkipTransactionsRequestV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new STR(this);
     }
 
     @Override
     protected SkipTransactionsRequest cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new SkipTransactionsRequest(this, version);
     }
 
     @Override
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsRequestV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsRequestV1.java
deleted file mode 100644 (file)
index c7c383c..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import com.google.common.collect.ImmutableList;
-import com.google.common.primitives.UnsignedLong;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.util.List;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.concepts.WritableObjects;
-
-/**
- * Externalizable proxy for use with {@link SkipTransactionsRequest}. It implements the initial
- * (Phosphorus SR1) serialization format.
- */
-final class SkipTransactionsRequestV1 extends AbstractTransactionRequestProxy<SkipTransactionsRequest> {
-    private List<UnsignedLong> others;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public SkipTransactionsRequestV1() {
-        // For Externalizable
-    }
-
-    SkipTransactionsRequestV1(final SkipTransactionsRequest request) {
-        super(request);
-        others = request.getOthers();
-    }
-
-    @Override
-    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-        super.readExternal(in);
-
-        final int size = in.readInt();
-        final var builder = ImmutableList.<UnsignedLong>builderWithExpectedSize(size);
-        int idx;
-        if (size % 2 != 0) {
-            builder.add(UnsignedLong.fromLongBits(WritableObjects.readLong(in)));
-            idx = 1;
-        } else {
-            idx = 0;
-        }
-        for (; idx < size; idx += 2) {
-            final byte hdr = WritableObjects.readLongHeader(in);
-            builder.add(UnsignedLong.fromLongBits(WritableObjects.readFirstLong(in, hdr)));
-            builder.add(UnsignedLong.fromLongBits(WritableObjects.readSecondLong(in, hdr)));
-        }
-        others = builder.build();
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-
-        final int size = others.size();
-        out.writeInt(size);
-
-        int idx;
-        if (size % 2 != 0) {
-            WritableObjects.writeLong(out, others.get(0).longValue());
-            idx = 1;
-        } else {
-            idx = 0;
-        }
-        for (; idx < size; idx += 2) {
-            WritableObjects.writeLongs(out, others.get(idx).longValue(), others.get(idx + 1).longValue());
-        }
-    }
-
-    @Override
-    protected SkipTransactionsRequest createRequest(final TransactionIdentifier target, final long sequence,
-            final ActorRef replyToActor) {
-        return new SkipTransactionsRequest(target, sequence, replyToActor, others);
-    }
-}
index b62af7e7dc491ac14591ebac21a630fddabfd779..9f3d54d9a61a990052c7f66b01b957df6dd32055 100644 (file)
@@ -7,29 +7,32 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
 /**
  * Successful reply to a {@link SkipTransactionsRequest}.
  */
-@Beta
+// FIXME: rename to SkipTransactionsSuccess
 public final class SkipTransactionsResponse extends TransactionSuccess<SkipTransactionsResponse> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
+    private SkipTransactionsResponse(final SkipTransactionsResponse success, final ABIVersion version) {
+        super(success, version);
+    }
+
     public SkipTransactionsResponse(final TransactionIdentifier identifier, final long sequence) {
         super(identifier, sequence);
     }
 
     @Override
-    protected AbstractTransactionSuccessProxy<SkipTransactionsResponse> externalizableProxy(
-            final ABIVersion version) {
-        return new SkipTransactionsResponseProxyV1(this);
+    protected STS externalizableProxy(final ABIVersion version) {
+        return new STS(this);
     }
 
     @Override
     protected SkipTransactionsResponse cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new SkipTransactionsResponse(this, version);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsResponseProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsResponseProxyV1.java
deleted file mode 100644 (file)
index 9bc93f9..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link SkipTransactionsResponse}. It implements the initial (Phosphorus SR1)
- * serialization format.
- */
-final class SkipTransactionsResponseProxyV1 extends AbstractTransactionSuccessProxy<SkipTransactionsResponse> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public SkipTransactionsResponseProxyV1() {
-        // For Externalizable
-    }
-
-    SkipTransactionsResponseProxyV1(final SkipTransactionsResponse success) {
-        super(success);
-    }
-
-    @Override
-    protected SkipTransactionsResponse createSuccess(final TransactionIdentifier target, final long sequence) {
-        return new SkipTransactionsResponse(target, sequence);
-    }
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TAR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TAR.java
new file mode 100644 (file)
index 0000000..98f63b7
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link TransactionAbortRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TAR implements TransactionAbortRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionAbortRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public TAR() {
+        // for Externalizable
+    }
+
+    TAR(final TransactionAbortRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionAbortRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final TransactionAbortRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TAS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TAS.java
new file mode 100644 (file)
index 0000000..daeee07
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link TransactionAbortSuccess}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TAS implements TransactionSuccess.SerialForm<TransactionAbortSuccess> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionAbortSuccess message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public TAS() {
+        // for Externalizable
+    }
+
+    TAS(final TransactionAbortSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionAbortSuccess message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final TransactionAbortSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionAbortSuccess readExternal(final ObjectInput in, final TransactionIdentifier target,
+            final long sequence) {
+        return new TransactionAbortSuccess(target, sequence);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TCCS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TCCS.java
new file mode 100644 (file)
index 0000000..22a8a84
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link TransactionCanCommitSuccess}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TCCS implements TransactionSuccess.SerialForm<TransactionCanCommitSuccess> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionCanCommitSuccess message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public TCCS() {
+        // for Externalizable
+    }
+
+    TCCS(final TransactionCanCommitSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionCanCommitSuccess message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final TransactionCanCommitSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionCanCommitSuccess readExternal(final ObjectInput in, final TransactionIdentifier target,
+            final long sequence) {
+        return new TransactionCanCommitSuccess(target, sequence);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TCS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TCS.java
new file mode 100644 (file)
index 0000000..7f897d8
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link TransactionCommitSuccess}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TCS implements TransactionSuccess.SerialForm<TransactionCommitSuccess> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionCommitSuccess message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public TCS() {
+        // for Externalizable
+    }
+
+    TCS(final TransactionCommitSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionCommitSuccess message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final TransactionCommitSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionCommitSuccess readExternal(final ObjectInput in, final TransactionIdentifier target,
+            final long sequence) {
+        return new TransactionCommitSuccess(target, sequence);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TDCR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TDCR.java
new file mode 100644 (file)
index 0000000..01c2733
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link TransactionDoCommitRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TDCR implements TransactionDoCommitRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionDoCommitRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public TDCR() {
+        // for Externalizable
+    }
+
+    TDCR(final TransactionDoCommitRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionDoCommitRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final TransactionDoCommitRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TF.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TF.java
new file mode 100644 (file)
index 0000000..6e26fc3
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link TransactionFailure}. It implements the Chlorine SR2 serialization format.
+ */
+final class TF implements TransactionFailure.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionFailure message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public TF() {
+        // for Externalizable
+    }
+
+    TF(final TransactionFailure message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionFailure message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final TransactionFailure message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPCR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPCR.java
new file mode 100644 (file)
index 0000000..0bf4ae5
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link TransactionPreCommitRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TPCR implements TransactionPreCommitRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionPreCommitRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public TPCR() {
+        // for Externalizable
+    }
+
+    TPCR(final TransactionPreCommitRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionPreCommitRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final TransactionPreCommitRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPCS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPCS.java
new file mode 100644 (file)
index 0000000..a64efa1
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link TransactionPreCommitSuccess}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TPCS implements TransactionSuccess.SerialForm<TransactionPreCommitSuccess> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionPreCommitSuccess message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public TPCS() {
+        // for Externalizable
+    }
+
+    TPCS(final TransactionPreCommitSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionPreCommitSuccess message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final TransactionPreCommitSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionPreCommitSuccess readExternal(final ObjectInput in, final TransactionIdentifier target,
+            final long sequence) throws IOException {
+        return new TransactionPreCommitSuccess(target, sequence);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPR.java
new file mode 100644 (file)
index 0000000..a80e1f6
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link TransactionPurgeRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TPR implements TransactionPurgeRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionPurgeRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public TPR() {
+        // for Externalizable
+    }
+
+    TPR(final TransactionPurgeRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionPurgeRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final TransactionPurgeRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPS.java
new file mode 100644 (file)
index 0000000..1b2f94a
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link TransactionPurgeResponse}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TPS implements TransactionSuccess.SerialForm<TransactionPurgeResponse> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionPurgeResponse message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public TPS() {
+        // for Externalizable
+    }
+
+    TPS(final TransactionPurgeResponse message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionPurgeResponse message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final TransactionPurgeResponse message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionPurgeResponse readExternal(final ObjectInput in, final TransactionIdentifier target,
+            final long sequence) {
+        return new TransactionPurgeResponse(target, sequence);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
index b8499cc2a25dacaccd126c568d37ef2aedfbfcee..c9238ab9af80bcac87185aab1082e33bd8078290 100644 (file)
@@ -8,30 +8,40 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.ObjectInput;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
 /**
  * A transaction request to perform the abort step of the three-phase commit protocol.
- *
- * @author Robert Varga
  */
-@Beta
 public final class TransactionAbortRequest extends TransactionRequest<TransactionAbortRequest> {
+    interface SerialForm extends TransactionRequest.SerialForm<TransactionAbortRequest> {
+        @Override
+        default TransactionAbortRequest readExternal(final ObjectInput in, final TransactionIdentifier target,
+                final long sequence, final ActorRef replyTo) {
+            return new TransactionAbortRequest(target, sequence, replyTo);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
+    private TransactionAbortRequest(final TransactionAbortRequest request, final ABIVersion version) {
+        super(request, version);
+    }
+
     public TransactionAbortRequest(final TransactionIdentifier target, final long sequence, final ActorRef replyTo) {
         super(target, sequence, replyTo);
     }
 
     @Override
-    protected TransactionAbortRequestProxyV1 externalizableProxy(final ABIVersion version) {
-        return new TransactionAbortRequestProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new TAR(this);
     }
 
     @Override
     protected TransactionAbortRequest cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new TransactionAbortRequest(this, version);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortRequestProxyV1.java
deleted file mode 100644 (file)
index 3e67dfe..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionAbortRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class TransactionAbortRequestProxyV1 extends AbstractTransactionRequestProxy<TransactionAbortRequest> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public TransactionAbortRequestProxyV1() {
-        // For Externalizable
-    }
-
-    TransactionAbortRequestProxyV1(final TransactionAbortRequest request) {
-        super(request);
-    }
-
-    @Override
-    protected TransactionAbortRequest createRequest(final TransactionIdentifier target, final long sequence,
-            final ActorRef replyTo) {
-        return new TransactionAbortRequest(target, sequence, replyTo);
-    }
-}
index 69c6dddd8f9c49f84801cbb0f938c71b256ec8d3..db92890b1b94df332a36dcd70e1a44d1ded39b16 100644 (file)
@@ -17,19 +17,24 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
  * @author Robert Varga
  */
 public final class TransactionAbortSuccess extends TransactionSuccess<TransactionAbortSuccess> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
+    private TransactionAbortSuccess(final TransactionAbortSuccess success, final ABIVersion version) {
+        super(success, version);
+    }
+
     public TransactionAbortSuccess(final TransactionIdentifier identifier, final long sequence) {
         super(identifier, sequence);
     }
 
     @Override
-    protected AbstractTransactionSuccessProxy<TransactionAbortSuccess> externalizableProxy(final ABIVersion version) {
-        return new TransactionAbortSuccessProxyV1(this);
+    protected TAS externalizableProxy(final ABIVersion version) {
+        return new TAS(this);
     }
 
     @Override
     protected TransactionAbortSuccess cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new TransactionAbortSuccess(this, version);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortSuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortSuccessProxyV1.java
deleted file mode 100644 (file)
index c9de3b9..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionAbortSuccess}. It implements the initial (Boron)
- * serialization format.
- *
- * @author Robert Varga
- */
-final class TransactionAbortSuccessProxyV1 extends AbstractTransactionSuccessProxy<TransactionAbortSuccess> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public TransactionAbortSuccessProxyV1() {
-        // For Externalizable
-    }
-
-    TransactionAbortSuccessProxyV1(final TransactionAbortSuccess success) {
-        super(success);
-    }
-
-    @Override
-    protected TransactionAbortSuccess createSuccess(final TransactionIdentifier target, final long sequence) {
-        return new TransactionAbortSuccess(target, sequence);
-    }
-}
index e6149bd5da226529e746a07e09314fde37759346..55c5cdb2d61641d3cc2b3224c04a88feccde7708 100644 (file)
@@ -17,20 +17,24 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
  * @author Robert Varga
  */
 public final class TransactionCanCommitSuccess extends TransactionSuccess<TransactionCanCommitSuccess> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
+    private TransactionCanCommitSuccess(final TransactionCanCommitSuccess success, final ABIVersion version) {
+        super(success, version);
+    }
+
     public TransactionCanCommitSuccess(final TransactionIdentifier identifier, final long sequence) {
         super(identifier, sequence);
     }
 
     @Override
-    protected AbstractTransactionSuccessProxy<TransactionCanCommitSuccess> externalizableProxy(
-            final ABIVersion version) {
-        return new TransactionCanCommitSuccessProxyV1(this);
+    protected TCCS externalizableProxy(final ABIVersion version) {
+        return new TCCS(this);
     }
 
     @Override
     protected TransactionCanCommitSuccess cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new TransactionCanCommitSuccess(this, version);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCanCommitSuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCanCommitSuccessProxyV1.java
deleted file mode 100644 (file)
index b41ec29..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionCanCommitSuccess}. It implements the initial (Boron)
- * serialization format.
- *
- * @author Robert Varga
- */
-final class TransactionCanCommitSuccessProxyV1 extends AbstractTransactionSuccessProxy<TransactionCanCommitSuccess> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public TransactionCanCommitSuccessProxyV1() {
-        // For Externalizable
-    }
-
-    TransactionCanCommitSuccessProxyV1(final TransactionCanCommitSuccess success) {
-        super(success);
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-    }
-
-    @Override
-    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-        super.readExternal(in);
-    }
-
-    @Override
-    protected TransactionCanCommitSuccess createSuccess(final TransactionIdentifier target, final long sequence) {
-        return new TransactionCanCommitSuccess(target, sequence);
-    }
-}
index 6b28244484e3eb739881696429e9119db14dc47e..1fc06da9c988685027ffb7ae18515e805f4a2b6d 100644 (file)
@@ -17,19 +17,24 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
  * @author Robert Varga
  */
 public final class TransactionCommitSuccess extends TransactionSuccess<TransactionCommitSuccess> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
+    private TransactionCommitSuccess(final TransactionCommitSuccess success, final ABIVersion version) {
+        super(success, version);
+    }
+
     public TransactionCommitSuccess(final TransactionIdentifier identifier, final long sequence) {
         super(identifier, sequence);
     }
 
     @Override
-    protected AbstractTransactionSuccessProxy<TransactionCommitSuccess> externalizableProxy(final ABIVersion version) {
-        return new TransactionCommitSuccessProxyV1(this);
+    protected TCS externalizableProxy(final ABIVersion version) {
+        return new TCS(this);
     }
 
     @Override
     protected TransactionCommitSuccess cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new TransactionCommitSuccess(this, version);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCommitSuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCommitSuccessProxyV1.java
deleted file mode 100644 (file)
index f23da7b..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionCommitSuccess}. It implements the initial (Boron)
- * serialization format.
- *
- * @author Robert Varga
- */
-final class TransactionCommitSuccessProxyV1 extends AbstractTransactionSuccessProxy<TransactionCommitSuccess> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public TransactionCommitSuccessProxyV1() {
-        // For Externalizable
-    }
-
-    TransactionCommitSuccessProxyV1(final TransactionCommitSuccess success) {
-        super(success);
-    }
-
-    @Override
-    protected TransactionCommitSuccess createSuccess(final TransactionIdentifier target, final long sequence) {
-        return new TransactionCommitSuccess(target, sequence);
-    }
-}
index 9f0cb81876254d93c38f604002b56f504bdb6806..d6eb297bb043b55d4764b5a30e81eb68ac280609 100644 (file)
@@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.access.commands;
 
 import static java.util.Objects.requireNonNull;
 
-import com.google.common.annotations.Beta;
 import java.io.IOException;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
@@ -17,10 +16,7 @@ import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutpu
 
 /**
  * A {@link TransactionModification} which has a data component.
- *
- * @author Robert Varga
  */
-@Beta
 public abstract class TransactionDataModification extends TransactionModification {
     private final NormalizedNode data;
 
index 62acdbbb76ba627a2fd970b0cdeb72214a72d2db..00c9bd8259fb3eaf22467becd632f990d1883f88 100644 (file)
@@ -7,15 +7,11 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 
 /**
  * Delete a particular path.
- *
- * @author Robert Varga
  */
-@Beta
 public final class TransactionDelete extends TransactionModification {
     public TransactionDelete(final YangInstanceIdentifier path) {
         super(path);
index 955c2680086db7553969857e0656238e37c179f8..ad7ffad146a0f5a05215cd21a3af0b9655acd1f8 100644 (file)
@@ -8,30 +8,40 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.ObjectInput;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
 /**
  * A transaction request to perform the final, doCommit, step of the three-phase commit protocol.
- *
- * @author Robert Varga
  */
-@Beta
 public final class TransactionDoCommitRequest extends TransactionRequest<TransactionDoCommitRequest> {
+    interface SerialForm extends TransactionRequest.SerialForm<TransactionDoCommitRequest> {
+        @Override
+        default TransactionDoCommitRequest readExternal(final ObjectInput in, final TransactionIdentifier target,
+                final long sequence, final ActorRef replyTo) {
+            return new TransactionDoCommitRequest(target, sequence, replyTo);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
+    private TransactionDoCommitRequest(final TransactionDoCommitRequest request, final ABIVersion version) {
+        super(request, version);
+    }
+
     public TransactionDoCommitRequest(final TransactionIdentifier target, final long sequence, final ActorRef replyTo) {
         super(target, sequence, replyTo);
     }
 
     @Override
-    protected TransactionDoCommitRequestProxyV1 externalizableProxy(final ABIVersion version) {
-        return new TransactionDoCommitRequestProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new TDCR(this);
     }
 
     @Override
     protected TransactionDoCommitRequest cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new TransactionDoCommitRequest(this, version);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDoCommitRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDoCommitRequestProxyV1.java
deleted file mode 100644 (file)
index fcb63fd..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionDoCommitRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class TransactionDoCommitRequestProxyV1 extends AbstractTransactionRequestProxy<TransactionDoCommitRequest> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public TransactionDoCommitRequestProxyV1() {
-        // For Externalizable
-    }
-
-    TransactionDoCommitRequestProxyV1(final TransactionDoCommitRequest request) {
-        super(request);
-    }
-
-    @Override
-    protected TransactionDoCommitRequest createRequest(final TransactionIdentifier target, final long sequence,
-            final ActorRef replyTo) {
-        return new TransactionDoCommitRequest(target, sequence, replyTo);
-    }
-}
index e0b6a5998795c271453d761edb158700a8e9a0b6..288a90ee3c3d89e8e75c1db17b4285d6f2e7655f 100644 (file)
@@ -7,7 +7,8 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
+import java.io.DataInput;
+import java.io.IOException;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 import org.opendaylight.controller.cluster.access.concepts.RequestFailure;
@@ -15,24 +16,39 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
 
 /**
  * Generic {@link RequestFailure} involving a {@link TransactionRequest}.
- *
- * @author Robert Varga
  */
-@Beta
 public final class TransactionFailure extends RequestFailure<TransactionIdentifier, TransactionFailure> {
+    interface SerialForm extends RequestFailure.SerialForm<TransactionIdentifier, TransactionFailure> {
+        @Override
+        default TransactionIdentifier readTarget(final DataInput in) throws IOException {
+            return TransactionIdentifier.readFrom(in);
+        }
+
+        @Override
+        default TransactionFailure createFailure(final TransactionIdentifier target, final long sequence,
+                final RequestException cause) {
+            return new TransactionFailure(target, sequence, cause);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
+    private TransactionFailure(final TransactionFailure failure, final ABIVersion version) {
+        super(failure, version);
+    }
+
     TransactionFailure(final TransactionIdentifier target, final long sequence, final RequestException cause) {
         super(target, sequence, cause);
     }
 
     @Override
     protected TransactionFailure cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new TransactionFailure(this, version);
     }
 
     @Override
-    protected TransactionFailureProxyV1 externalizableProxy(final ABIVersion version) {
-        return new TransactionFailureProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new TF(this);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionFailureProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionFailureProxyV1.java
deleted file mode 100644 (file)
index d3b1dd7..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.DataInput;
-import java.io.IOException;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy;
-import org.opendaylight.controller.cluster.access.concepts.RequestException;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionFailure}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class TransactionFailureProxyV1 extends AbstractRequestFailureProxy<TransactionIdentifier, TransactionFailure> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public TransactionFailureProxyV1() {
-        // For Externalizable
-    }
-
-    TransactionFailureProxyV1(final TransactionFailure failure) {
-        super(failure);
-    }
-
-    @Override
-    protected TransactionFailure createFailure(final TransactionIdentifier target, final long sequence,
-            final RequestException cause) {
-        return new TransactionFailure(target, sequence, cause);
-    }
-
-    @Override
-    protected TransactionIdentifier readTarget(final DataInput in) throws IOException {
-        return TransactionIdentifier.readFrom(in);
-    }
-}
index bf2580f44b5437ec2478989706b4f6f0e79423ad..2784687d288e19f1e4a74ddc3e01a0c5f907a954 100644 (file)
@@ -7,16 +7,12 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
 /**
  * Merge a {@link NormalizedNode} tree onto a specific path.
- *
- * @author Robert Varga
  */
-@Beta
 public final class TransactionMerge extends TransactionDataModification {
     public TransactionMerge(final YangInstanceIdentifier path, final NormalizedNode data) {
         super(path, data);
index a4e019437393731b8c7794f9cf3220c6e0b18168..96bea87d46764c808d5a4cf50fd2dced686f66f6 100644 (file)
@@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.access.commands;
 
 import static java.util.Objects.requireNonNull;
 
-import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects;
 import java.io.IOException;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
@@ -23,10 +22,7 @@ import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutpu
  * {@link #readFrom(NormalizedNodeDataInput, ReusableStreamReceiver)} methods for explicit serialization. The reason for
  * this is that they are usually transmitted in bulk, hence it is advantageous to reuse
  * a {@link NormalizedNodeDataOutput} instance to achieve better compression.
- *
- * @author Robert Varga
  */
-@Beta
 public abstract class TransactionModification {
     static final byte TYPE_DELETE = 1;
     static final byte TYPE_MERGE = 2;
@@ -57,15 +53,11 @@ public abstract class TransactionModification {
     static TransactionModification readFrom(final NormalizedNodeDataInput in, final ReusableStreamReceiver writer)
             throws IOException {
         final byte type = in.readByte();
-        switch (type) {
-            case TYPE_DELETE:
-                return new TransactionDelete(in.readYangInstanceIdentifier());
-            case TYPE_MERGE:
-                return new TransactionMerge(in.readYangInstanceIdentifier(), in.readNormalizedNode(writer));
-            case TYPE_WRITE:
-                return new TransactionWrite(in.readYangInstanceIdentifier(), in.readNormalizedNode(writer));
-            default:
-                throw new IllegalArgumentException("Unhandled type " + type);
-        }
+        return switch (type) {
+            case TYPE_DELETE -> new TransactionDelete(in.readYangInstanceIdentifier());
+            case TYPE_MERGE -> new TransactionMerge(in.readYangInstanceIdentifier(), in.readNormalizedNode(writer));
+            case TYPE_WRITE -> new TransactionWrite(in.readYangInstanceIdentifier(), in.readNormalizedNode(writer));
+            default -> throw new IllegalArgumentException("Unhandled type " + type);
+        };
     }
 }
index 226557d12436fca9b8cce808f2b82907559741b8..3172842f7667551502f8248ca30374081895a562 100644 (file)
@@ -8,31 +8,41 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.ObjectInput;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
 /**
  * A transaction request to perform the second, preCommit, step of the three-phase commit protocol.
- *
- * @author Robert Varga
  */
-@Beta
 public final class TransactionPreCommitRequest extends TransactionRequest<TransactionPreCommitRequest> {
+    interface SerialForm extends TransactionRequest.SerialForm<TransactionPreCommitRequest> {
+        @Override
+        default TransactionPreCommitRequest readExternal(final ObjectInput in, final TransactionIdentifier target,
+                final long sequence, final ActorRef replyTo) {
+            return new TransactionPreCommitRequest(target, sequence, replyTo);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
+    private TransactionPreCommitRequest(final TransactionPreCommitRequest request, final ABIVersion version) {
+        super(request, version);
+    }
+
     public TransactionPreCommitRequest(final TransactionIdentifier target, final long sequence,
             final ActorRef replyTo) {
         super(target, sequence, replyTo);
     }
 
     @Override
-    protected TransactionPreCommitRequestProxyV1 externalizableProxy(final ABIVersion version) {
-        return new TransactionPreCommitRequestProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new TPCR(this);
     }
 
     @Override
     protected TransactionPreCommitRequest cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new TransactionPreCommitRequest(this, version);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitRequestProxyV1.java
deleted file mode 100644 (file)
index bf044e1..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionPreCommitRequest}. It implements the initial (Boron)
- * serialization format.
- *
- * @author Robert Varga
- */
-final class TransactionPreCommitRequestProxyV1 extends AbstractTransactionRequestProxy<TransactionPreCommitRequest> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public TransactionPreCommitRequestProxyV1() {
-        // For Externalizable
-    }
-
-    TransactionPreCommitRequestProxyV1(final TransactionPreCommitRequest request) {
-        super(request);
-    }
-
-    @Override
-    protected TransactionPreCommitRequest createRequest(final TransactionIdentifier target, final long sequence,
-            final ActorRef replyTo) {
-        return new TransactionPreCommitRequest(target, sequence, replyTo);
-    }
-}
index 716f37804cc0b88f7fd0efdba345fc8de381e086..695d2615120a3612eb3161575828bd053c1b941f 100644 (file)
@@ -16,20 +16,24 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
  * @author Robert Varga
  */
 public final class TransactionPreCommitSuccess extends TransactionSuccess<TransactionPreCommitSuccess> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
+    private TransactionPreCommitSuccess(final TransactionPreCommitSuccess success, final ABIVersion version) {
+        super(success, version);
+    }
+
     public TransactionPreCommitSuccess(final TransactionIdentifier identifier, final long sequence) {
         super(identifier, sequence);
     }
 
     @Override
-    protected AbstractTransactionSuccessProxy<TransactionPreCommitSuccess> externalizableProxy(
-            final ABIVersion version) {
-        return new TransactionPreCommitSuccessProxyV1(this);
+    protected TPCS externalizableProxy(final ABIVersion version) {
+        return new TPCS(this);
     }
 
     @Override
     protected TransactionPreCommitSuccess cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new TransactionPreCommitSuccess(this, version);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitSuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitSuccessProxyV1.java
deleted file mode 100644 (file)
index 17b1b8d..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionPreCommitSuccess}. It implements the initial (Boron)
- * serialization format.
- *
- * @author Robert Varga
- */
-final class TransactionPreCommitSuccessProxyV1 extends AbstractTransactionSuccessProxy<TransactionPreCommitSuccess> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public TransactionPreCommitSuccessProxyV1() {
-        // For Externalizable
-    }
-
-    TransactionPreCommitSuccessProxyV1(final TransactionPreCommitSuccess success) {
-        super(success);
-    }
-
-    @Override
-    protected TransactionPreCommitSuccess createSuccess(final TransactionIdentifier target, final long sequence) {
-        return new TransactionPreCommitSuccess(target, sequence);
-    }
-}
index a0fab70188f1a29f0f8e019da85e8ac0a61d1d51..757c8134a24d2dda1c8cfccc07f7b6cc119290b1 100644 (file)
@@ -8,7 +8,7 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.ObjectInput;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
@@ -16,24 +16,34 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
  * A transaction request to perform the final transaction transition, which is purging it from the protocol view,
  * meaning the frontend has no further knowledge of the transaction. The backend is free to purge any state related
  * to the transaction and responds with a {@link TransactionPurgeResponse}.
- *
- * @author Robert Varga
  */
-@Beta
 public final class TransactionPurgeRequest extends TransactionRequest<TransactionPurgeRequest> {
+    interface SerialForm extends TransactionRequest.SerialForm<TransactionPurgeRequest> {
+        @Override
+        default TransactionPurgeRequest readExternal(final ObjectInput in, final TransactionIdentifier target,
+                final long sequence, final ActorRef replyTo) {
+            return new TransactionPurgeRequest(target, sequence, replyTo);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
+    private TransactionPurgeRequest(final TransactionPurgeRequest request, final ABIVersion version) {
+        super(request, version);
+    }
+
     public TransactionPurgeRequest(final TransactionIdentifier target, final long sequence, final ActorRef replyTo) {
         super(target, sequence, replyTo);
     }
 
     @Override
-    protected TransactionPurgeRequestProxyV1 externalizableProxy(final ABIVersion version) {
-        return new TransactionPurgeRequestProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new TPR(this);
     }
 
     @Override
     protected TransactionPurgeRequest cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new TransactionPurgeRequest(this, version);
     }
 }
\ No newline at end of file
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeRequestProxyV1.java
deleted file mode 100644 (file)
index ee56b4c..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionPurgeRequest}. It implements the initial (Boron)
- * serialization format.
- *
- * @author Robert Varga
- */
-final class TransactionPurgeRequestProxyV1 extends AbstractTransactionRequestProxy<TransactionPurgeRequest> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public TransactionPurgeRequestProxyV1() {
-        // For Externalizable
-    }
-
-    TransactionPurgeRequestProxyV1(final TransactionPurgeRequest request) {
-        super(request);
-    }
-
-    @Override
-    protected TransactionPurgeRequest createRequest(final TransactionIdentifier target, final long sequence,
-            final ActorRef replyTo) {
-        return new TransactionPurgeRequest(target, sequence, replyTo);
-    }
-}
index 54710143bcdd8633fa030c9f1b0040abffcb3fdd..558e414d12991d5f31379193668d86de3f2d1da8 100644 (file)
@@ -12,24 +12,27 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
 
 /**
  * Successful reply to a {@link TransactionPurgeRequest}.
- *
- * @author Robert Varga
  */
+// FIXME: rename to TransactionPurgeSuccess
 public final class TransactionPurgeResponse extends TransactionSuccess<TransactionPurgeResponse> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
+    private TransactionPurgeResponse(final TransactionPurgeResponse success, final ABIVersion version) {
+        super(success, version);
+    }
+
     public TransactionPurgeResponse(final TransactionIdentifier identifier, final long sequence) {
         super(identifier, sequence);
     }
 
     @Override
-    protected AbstractTransactionSuccessProxy<TransactionPurgeResponse> externalizableProxy(
-            final ABIVersion version) {
-        return new TransactionPurgeResponseProxyV1(this);
+    protected TPS externalizableProxy(final ABIVersion version) {
+        return new TPS(this);
     }
 
     @Override
     protected TransactionPurgeResponse cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new TransactionPurgeResponse(this, version);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeResponseProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeResponseProxyV1.java
deleted file mode 100644 (file)
index d15d729..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionPurgeResponse}. It implements the initial (Boron)
- * serialization format.
- *
- * @author Robert Varga
- */
-final class TransactionPurgeResponseProxyV1 extends AbstractTransactionSuccessProxy<TransactionPurgeResponse> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public TransactionPurgeResponseProxyV1() {
-        // For Externalizable
-    }
-
-    TransactionPurgeResponseProxyV1(final TransactionPurgeResponse success) {
-        super(success);
-    }
-
-    @Override
-    protected TransactionPurgeResponse createSuccess(final TransactionIdentifier target, final long sequence) {
-        return new TransactionPurgeResponse(target, sequence);
-    }
-}
index 4dcf6ea93c6451d5b0b96c3e3d24a29b9bbfe82f..15d98f91507990ddbcd499d28934a4330f93fb9d 100644 (file)
@@ -8,7 +8,8 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.DataInput;
+import java.io.IOException;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.Request;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
@@ -18,12 +19,18 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
  * Abstract base class for {@link Request}s involving specific transaction. This class is visible outside of this
  * package solely for the ability to perform a unified instanceof check.
  *
- * @author Robert Varga
- *
  * @param <T> Message type
  */
-@Beta
 public abstract class TransactionRequest<T extends TransactionRequest<T>> extends Request<TransactionIdentifier, T> {
+    protected interface SerialForm<T extends TransactionRequest<T>>
+            extends Request.SerialForm<TransactionIdentifier, T> {
+        @Override
+        default TransactionIdentifier readTarget(final DataInput in) throws IOException {
+            return TransactionIdentifier.readFrom(in);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     TransactionRequest(final TransactionIdentifier identifier, final long sequence, final ActorRef replyTo) {
@@ -40,5 +47,5 @@ public abstract class TransactionRequest<T extends TransactionRequest<T>> extend
     }
 
     @Override
-    protected abstract AbstractTransactionRequestProxy<T> externalizableProxy(ABIVersion version);
+    protected abstract SerialForm<T> externalizableProxy(ABIVersion version);
 }
index 636a2e741bd42f8fcd5471c92a794a21ae3b2528..689b4d5ee1443faadc57d93fe1f3d3f8ad8585fc 100644 (file)
@@ -7,7 +7,8 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
+import java.io.DataInput;
+import java.io.IOException;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
@@ -16,13 +17,18 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
  * Abstract base class for {@link RequestSuccess}es involving specific transaction. This class is visible outside of
  * this package solely for the ability to perform a unified instanceof check.
  *
- * @author Robert Varga
- *
  * @param <T> Message type
  */
-@Beta
 public abstract class TransactionSuccess<T extends TransactionSuccess<T>>
         extends RequestSuccess<TransactionIdentifier, T> {
+    interface SerialForm<T extends TransactionSuccess<T>> extends RequestSuccess.SerialForm<TransactionIdentifier, T> {
+        @Override
+        default TransactionIdentifier readTarget(final DataInput in) throws IOException {
+            return TransactionIdentifier.readFrom(in);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     TransactionSuccess(final TransactionIdentifier identifier, final long sequence) {
@@ -34,5 +40,5 @@ public abstract class TransactionSuccess<T extends TransactionSuccess<T>>
     }
 
     @Override
-    protected abstract AbstractTransactionSuccessProxy<T> externalizableProxy(ABIVersion version);
+    protected abstract SerialForm<T> externalizableProxy(ABIVersion version);
 }
index 4960c4ad989ed53ecffd23e8ec06fa3b90b3b2de..af1acbe57ed25ea2dfbe36067bf14aad5145f7b9 100644 (file)
@@ -7,16 +7,12 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
 /**
  * Modification to write (and replace) a subtree at specified path with another subtree.
- *
- * @author Robert Varga
  */
-@Beta
 public final class TransactionWrite extends TransactionDataModification {
     public TransactionWrite(final YangInstanceIdentifier path, final NormalizedNode data) {
         super(path, data);
index 196c60c0d82951068c46a9d3edc225a6ac622002..c688df3c90f5ede0d33791a7686706bdccb5efb2 100644 (file)
@@ -7,17 +7,14 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 
 /**
  * A {@link RequestException} indicating that the backend has received a request referencing an unknown history. This
  * typically happens when the linear history ID is newer than the highest observed {@link CreateLocalHistoryRequest}.
- *
- * @author Robert Varga
  */
-@Beta
 public final class UnknownHistoryException extends RequestException {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public UnknownHistoryException(final Long lastSeenHistory) {
@@ -25,7 +22,7 @@ public final class UnknownHistoryException extends RequestException {
     }
 
     private static String historyToString(final Long history) {
-        return history == null ? "null" : Long.toUnsignedString(history.longValue());
+        return history == null ? "null" : Long.toUnsignedString(history);
     }
 
     @Override
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractEnvelopeProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractEnvelopeProxy.java
deleted file mode 100644 (file)
index 71a731a..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.opendaylight.yangtools.concepts.WritableObjects;
-
-abstract class AbstractEnvelopeProxy<T extends Message<?, ?>> implements Externalizable {
-    private static final long serialVersionUID = 1L;
-
-    private T message;
-    private long sessionId;
-    private long txSequence;
-
-    AbstractEnvelopeProxy() {
-        // for Externalizable
-    }
-
-    AbstractEnvelopeProxy(final Envelope<T> envelope) {
-        message = envelope.getMessage();
-        txSequence = envelope.getTxSequence();
-        sessionId = envelope.getSessionId();
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) throws IOException {
-        WritableObjects.writeLongs(out, sessionId, txSequence);
-        out.writeObject(message);
-    }
-
-    @SuppressWarnings("unchecked")
-    @Override
-    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-        final byte header = WritableObjects.readLongHeader(in);
-        sessionId = WritableObjects.readFirstLong(in, header);
-        txSequence = WritableObjects.readSecondLong(in, header);
-        message = (T) in.readObject();
-    }
-
-    @SuppressWarnings("checkstyle:hiddenField")
-    abstract Envelope<T> createEnvelope(T wrappedNessage, long sessionId, long txSequence);
-
-    final Object readResolve() {
-        return createEnvelope(message, sessionId, txSequence);
-    }
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractMessageProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractMessageProxy.java
deleted file mode 100644 (file)
index 0367527..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-import static com.google.common.base.Verify.verifyNotNull;
-
-import java.io.DataInput;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.concepts.WritableIdentifier;
-import org.opendaylight.yangtools.concepts.WritableObjects;
-
-/**
- * Abstract Externalizable proxy for use with {@link Message} subclasses.
- *
- * @author Robert Varga
- *
- * @param <T> Target identifier type
- * @param <C> Message class
- */
-abstract class AbstractMessageProxy<T extends WritableIdentifier, C extends Message<T, C>> implements Externalizable {
-    private static final long serialVersionUID = 1L;
-    private T target;
-    private long sequence;
-
-    protected AbstractMessageProxy() {
-        // For Externalizable
-    }
-
-    AbstractMessageProxy(final @NonNull C message) {
-        this.target = message.getTarget();
-        this.sequence = message.getSequence();
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) throws IOException {
-        target.writeTo(out);
-        WritableObjects.writeLong(out, sequence);
-    }
-
-    @Override
-    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-        target = verifyNotNull(readTarget(in));
-        sequence = WritableObjects.readLong(in);
-    }
-
-    protected final Object readResolve() {
-        return verifyNotNull(createMessage(target, sequence));
-    }
-
-    protected abstract @NonNull T readTarget(@NonNull DataInput in) throws IOException;
-
-    abstract @NonNull C createMessage(@NonNull T msgTarget, long msgSequence);
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractRequestFailureProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractRequestFailureProxy.java
deleted file mode 100644 (file)
index e35936d..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-import com.google.common.annotations.Beta;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.concepts.WritableIdentifier;
-
-/**
- * Abstract Externalizable proxy for use with {@link RequestFailure} subclasses.
- *
- * @author Robert Varga
- *
- * @param <T> Target identifier type
- */
-@Beta
-public abstract class AbstractRequestFailureProxy<T extends WritableIdentifier, C extends RequestFailure<T, C>>
-        extends AbstractResponseProxy<T, C> {
-    private static final long serialVersionUID = 1L;
-    private RequestException cause;
-
-    protected AbstractRequestFailureProxy() {
-        // For Externalizable
-    }
-
-    protected AbstractRequestFailureProxy(final @NonNull C failure) {
-        super(failure);
-        this.cause = failure.getCause();
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-        out.writeObject(cause);
-    }
-
-    @Override
-    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-        super.readExternal(in);
-        cause = (RequestException) in.readObject();
-    }
-
-    @Override
-    final C createResponse(final T target, final long sequence) {
-        return createFailure(target, sequence, cause);
-    }
-
-    protected abstract @NonNull C createFailure(@NonNull T target, long sequence,
-            @NonNull RequestException failureCause);
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractRequestProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractRequestProxy.java
deleted file mode 100644 (file)
index 183766f..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-import akka.actor.ActorRef;
-import akka.serialization.JavaSerializer;
-import akka.serialization.Serialization;
-import com.google.common.annotations.Beta;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.concepts.WritableIdentifier;
-
-/**
- * Abstract Externalizable proxy for use with {@link Request} subclasses.
- *
- * @author Robert Varga
- *
- * @param <T> Target identifier type
- */
-@Beta
-public abstract class AbstractRequestProxy<T extends WritableIdentifier, C extends Request<T, C>>
-        extends AbstractMessageProxy<T, C> {
-    private static final long serialVersionUID = 1L;
-    private ActorRef replyTo;
-
-    protected AbstractRequestProxy() {
-        // For Externalizable
-    }
-
-    protected AbstractRequestProxy(final @NonNull C request) {
-        super(request);
-        this.replyTo = request.getReplyTo();
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-        out.writeObject(Serialization.serializedActorPath(replyTo));
-    }
-
-    @Override
-    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-        super.readExternal(in);
-        replyTo = JavaSerializer.currentSystem().value().provider().resolveActorRef((String) in.readObject());
-    }
-
-    @Override
-    final C createMessage(final T target, final long sequence) {
-        return createRequest(target, sequence, replyTo);
-    }
-
-    protected abstract @NonNull C createRequest(@NonNull T target, long sequence, @NonNull ActorRef replyToActor);
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractResponseEnvelopeProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractResponseEnvelopeProxy.java
deleted file mode 100644 (file)
index 1e873b4..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.opendaylight.yangtools.concepts.WritableObjects;
-
-abstract class AbstractResponseEnvelopeProxy<T extends Response<?, ?>> extends AbstractEnvelopeProxy<T> {
-    private static final long serialVersionUID = 1L;
-
-    private long executionTimeNanos;
-
-    AbstractResponseEnvelopeProxy() {
-        // for Externalizable
-    }
-
-    AbstractResponseEnvelopeProxy(final ResponseEnvelope<T> envelope) {
-        super(envelope);
-        this.executionTimeNanos = envelope.getExecutionTimeNanos();
-    }
-
-    @Override
-    public final void writeExternal(final ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-        WritableObjects.writeLong(out, executionTimeNanos);
-    }
-
-    @Override
-    public final void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-        super.readExternal(in);
-        executionTimeNanos = WritableObjects.readLong(in);
-    }
-
-    @Override
-    final ResponseEnvelope<T> createEnvelope(final T message, final long sessionId, final long txSequence) {
-        return createEnvelope(message, sessionId, txSequence, executionTimeNanos);
-    }
-
-    @SuppressWarnings("checkstyle:hiddenField")
-    abstract ResponseEnvelope<T> createEnvelope(T message, long sessionId, long txSequence, long executionTimeNanos);
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractResponseProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractResponseProxy.java
deleted file mode 100644 (file)
index c9edfdb..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.concepts.WritableIdentifier;
-
-/**
- * Abstract Externalizable proxy class to use with {@link Response} subclasses.
- *
- * @author Robert Varga
- *
- * @param <T> Target identifier type
- * @param <C> Message class
- */
-abstract class AbstractResponseProxy<T extends WritableIdentifier, C extends Response<T, C>>
-        extends AbstractMessageProxy<T, C> {
-    private static final long serialVersionUID = 1L;
-
-    protected AbstractResponseProxy() {
-        // for Externalizable
-    }
-
-    AbstractResponseProxy(final @NonNull C response) {
-        super(response);
-    }
-
-    @Override
-    final C createMessage(final T target, final long sequence) {
-        return createResponse(target, sequence);
-    }
-
-    abstract @NonNull C createResponse(@NonNull T target, long sequence);
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractSuccessProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractSuccessProxy.java
deleted file mode 100644 (file)
index ecf792e..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-import com.google.common.annotations.Beta;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.concepts.WritableIdentifier;
-
-/**
- * Abstract Externalizable proxy for use with {@link RequestSuccess} subclasses.
- *
- * @author Robert Varga
- *
- * @param <T> Target identifier type
- */
-@Beta
-public abstract class AbstractSuccessProxy<T extends WritableIdentifier, C extends RequestSuccess<T, C>>
-        extends AbstractResponseProxy<T, C> {
-    private static final long serialVersionUID = 1L;
-
-    protected AbstractSuccessProxy() {
-        // For Externalizable
-    }
-
-    protected AbstractSuccessProxy(final @NonNull C success) {
-        super(success);
-    }
-
-    @Override
-    final C createResponse(final T target, final long sequence) {
-        return createSuccess(target, sequence);
-    }
-
-    protected abstract @NonNull C createSuccess(@NonNull T target, long sequence);
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/CI.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/CI.java
new file mode 100644 (file)
index 0000000..e88764d
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link ClientIdentifier}.
+ */
+final class CI implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ClientIdentifier identifier;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public CI() {
+        // for Externalizable
+    }
+
+    CI(final ClientIdentifier identifier) {
+        this.identifier = requireNonNull(identifier);
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException {
+        identifier = new ClientIdentifier(FrontendIdentifier.readFrom(in), WritableObjects.readLong(in));
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        identifier.getFrontendId().writeTo(out);
+        WritableObjects.writeLong(out, identifier.getGeneration());
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(identifier);
+    }
+}
index c317ac31b6985710e4e25477656880033d47a2b6..42701539a6170a0f48942fa9e635ba29db0d464a 100644 (file)
@@ -9,14 +9,10 @@ package org.opendaylight.controller.cluster.access.concepts;
 
 import static java.util.Objects.requireNonNull;
 
-import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects;
 import java.io.DataInput;
 import java.io.DataOutput;
-import java.io.Externalizable;
 import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.cds.types.rev191024.ClientGeneration;
 import org.opendaylight.yangtools.concepts.WritableIdentifier;
@@ -26,45 +22,9 @@ import org.opendaylight.yangtools.yang.common.Uint64;
 /**
  * A cluster-wide unique identifier of a frontend instance. This identifier discerns between individual incarnations
  * of a particular frontend.
- *
- * @author Robert Varga
  */
-@Beta
 public final class ClientIdentifier implements WritableIdentifier {
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-        private FrontendIdentifier frontendId;
-        private long generation;
-
-        // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-        // be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // Needed for Externalizable
-        }
-
-        Proxy(final FrontendIdentifier frontendId, final long generation) {
-            this.frontendId = requireNonNull(frontendId);
-            this.generation = generation;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            frontendId.writeTo(out);
-            WritableObjects.writeLong(out, generation);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException {
-            frontendId = FrontendIdentifier.readFrom(in);
-            generation = WritableObjects.readLong(in);
-        }
-
-        private Object readResolve() {
-            return new ClientIdentifier(frontendId, generation);
-        }
-    }
-
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final @NonNull FrontendIdentifier frontendId;
@@ -110,24 +70,20 @@ public final class ClientIdentifier implements WritableIdentifier {
 
     @Override
     public boolean equals(final Object obj) {
-        if (this == obj) {
-            return true;
-        }
-        if (!(obj instanceof ClientIdentifier)) {
-            return false;
-        }
-
-        final ClientIdentifier other = (ClientIdentifier) obj;
-        return generation == other.generation && frontendId.equals(other.frontendId);
+        return this == obj || obj instanceof ClientIdentifier other && generation == other.generation
+            && frontendId.equals(other.frontendId);
     }
 
     @Override
     public String toString() {
-        return MoreObjects.toStringHelper(ClientIdentifier.class).add("frontend", frontendId)
-                .add("generation", Long.toUnsignedString(generation)).toString();
+        return MoreObjects.toStringHelper(ClientIdentifier.class)
+            .add("frontend", frontendId)
+            .add("generation", Long.toUnsignedString(generation))
+            .toString();
     }
 
+    @java.io.Serial
     private Object writeReplace() {
-        return new Proxy(frontendId, generation);
+        return new CI(this);
     }
 }
index c6123d6fc35de45a4946dba21ac68f6a6b92dadc..eed7b5374113a4bf4b91d53fb576281d667259e6 100644 (file)
@@ -10,11 +10,49 @@ package org.opendaylight.controller.cluster.access.concepts;
 import static java.util.Objects.requireNonNull;
 
 import com.google.common.base.MoreObjects;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
 import java.io.Serializable;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.yangtools.concepts.Immutable;
+import org.opendaylight.yangtools.concepts.WritableObjects;
 
 public abstract class Envelope<T extends Message<?, ?>> implements Immutable, Serializable {
+    interface SerialForm<T extends Message<?, ?>, E extends Envelope<T>> extends Externalizable {
+
+        @NonNull E envelope();
+
+        void setEnvelope(@NonNull E envelope);
+
+        @java.io.Serial
+        Object readResolve();
+
+        @Override
+        default void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+            final byte header = WritableObjects.readLongHeader(in);
+            final var sessionId = WritableObjects.readFirstLong(in, header);
+            final var txSequence = WritableObjects.readSecondLong(in, header);
+            @SuppressWarnings("unchecked")
+            final var message = (T) in.readObject();
+            setEnvelope(readExternal(in, sessionId, txSequence, message));
+        }
+
+        E readExternal(ObjectInput in, long sessionId, long txSequence, T message) throws IOException;
+
+        @Override
+        default void writeExternal(final ObjectOutput out) throws IOException {
+            writeExternal(out, envelope());
+        }
+
+        default void writeExternal(final ObjectOutput out, final @NonNull E envelope) throws IOException {
+            WritableObjects.writeLongs(out, envelope.getSessionId(), envelope.getTxSequence());
+            out.writeObject(envelope.getMessage());
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final @NonNull T message;
@@ -60,9 +98,10 @@ public abstract class Envelope<T extends Message<?, ?>> implements Immutable, Se
                 .add("txSequence", Long.toHexString(txSequence)).add("message", message).toString();
     }
 
+    @java.io.Serial
     final Object writeReplace() {
         return createProxy();
     }
 
-    abstract AbstractEnvelopeProxy<T> createProxy();
+    abstract @NonNull SerialForm<T, ?> createProxy();
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FE.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FE.java
new file mode 100644 (file)
index 0000000..3038437
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+
+/**
+ * Serialization proxy for {@link FailureEnvelope}.
+ */
+final class FE implements ResponseEnvelope.SerialForm<RequestFailure<?, ?>, FailureEnvelope> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private FailureEnvelope envelope;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public FE() {
+        // for Externalizable
+    }
+
+    FE(final FailureEnvelope envelope) {
+        this.envelope = requireNonNull(envelope);
+    }
+
+    @Override
+    public FailureEnvelope envelope() {
+        return verifyNotNull(envelope);
+    }
+
+    @Override
+    public void setEnvelope(final FailureEnvelope envelope) {
+        this.envelope = requireNonNull(envelope);
+    }
+
+    @Override
+    public FailureEnvelope readExternal(final ObjectInput in, final long sessionId, final long txSequence,
+            final RequestFailure<?, ?> message, final long executionTimeNanos) {
+        return new FailureEnvelope(message, sessionId, txSequence, executionTimeNanos);
+    }
+
+    @Override
+    public Object readResolve() {
+        return envelope();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FI.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FI.java
new file mode 100644 (file)
index 0000000..1a3e72b
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+
+/**
+ * Serialization proxy for {@link FrontendIdentifier}.
+ */
+final class FI implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private FrontendIdentifier identifier;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public FI() {
+        // for Externalizable
+    }
+
+    FI(final FrontendIdentifier identifier) {
+        this.identifier = requireNonNull(identifier);
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException {
+        identifier = new FrontendIdentifier(MemberName.readFrom(in), FrontendType.readFrom(in));
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        identifier.getMemberName().writeTo(out);
+        identifier.getClientType().writeTo(out);
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(identifier);
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FT.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FT.java
new file mode 100644 (file)
index 0000000..9e900f7
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.nio.charset.StandardCharsets;
+
+/**
+ * Serialization proxy for {@link FrontendType}.
+ */
+final class FT implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private byte[] serialized;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public FT() {
+        // for Externalizable
+    }
+
+    FT(final byte[] serialized) {
+        this.serialized = requireNonNull(serialized);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        out.writeInt(serialized.length);
+        out.write(serialized);
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException {
+        serialized = new byte[in.readInt()];
+        in.readFully(serialized);
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        // TODO: consider caching instances here
+        return new FrontendType(new String(serialized, StandardCharsets.UTF_8), serialized);
+    }
+}
index 1f641eb1819f945da32ee4a9b76a403e5e7bea3d..5342d05f5eb989e38787b90d8b8a4edc9ca151e9 100644 (file)
@@ -8,6 +8,7 @@
 package org.opendaylight.controller.cluster.access.concepts;
 
 public final class FailureEnvelope extends ResponseEnvelope<RequestFailure<?, ?>> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public FailureEnvelope(final RequestFailure<?, ?> message, final long sessionId, final long txSequence,
@@ -16,7 +17,7 @@ public final class FailureEnvelope extends ResponseEnvelope<RequestFailure<?, ?>
     }
 
     @Override
-    FailureEnvelopeProxy createProxy() {
-        return new FailureEnvelopeProxy(this);
+    FE createProxy() {
+        return new FE(this);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FailureEnvelopeProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FailureEnvelopeProxy.java
deleted file mode 100644 (file)
index adc50e1..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-final class FailureEnvelopeProxy extends AbstractResponseEnvelopeProxy<RequestFailure<?, ?>> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to be
-    // able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public FailureEnvelopeProxy() {
-        // for Externalizable
-    }
-
-    FailureEnvelopeProxy(final FailureEnvelope envelope) {
-        super(envelope);
-    }
-
-    @Override
-    ResponseEnvelope<RequestFailure<?, ?>> createEnvelope(final RequestFailure<?, ?> message, final long sessionId,
-            final long txSequence, final long executionTimeNanos) {
-        return new FailureEnvelope(message, sessionId, txSequence, executionTimeNanos);
-    }
-}
index 10abac6d58236deffc1f6af0f46358b52481be40..76aad38da71e4d802b988771b292c7fc34a01dba 100644 (file)
@@ -9,59 +9,20 @@ package org.opendaylight.controller.cluster.access.concepts;
 
 import static java.util.Objects.requireNonNull;
 
-import com.google.common.annotations.Beta;
 import java.io.DataInput;
 import java.io.DataOutput;
-import java.io.Externalizable;
 import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import java.util.Objects;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.yangtools.concepts.WritableIdentifier;
 
 /**
  * A cluster-wide unique identifier of a frontend type located at a cluster member.
- *
- * @author Robert Varga
  */
-@Beta
 public final class FrontendIdentifier implements WritableIdentifier {
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-        private MemberName memberName;
-        private FrontendType clientType;
-
-        // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-        // be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // Needed for Externalizable
-        }
-
-        Proxy(final MemberName memberName, final FrontendType clientType) {
-            this.memberName = requireNonNull(memberName);
-            this.clientType = requireNonNull(clientType);
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            memberName.writeTo(out);
-            clientType.writeTo(out);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException {
-            memberName = MemberName.readFrom(in);
-            clientType = FrontendType.readFrom(in);
-        }
-
-        private Object readResolve() {
-            return new FrontendIdentifier(memberName, clientType);
-        }
-    }
-
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+
     private final MemberName memberName;
     private final FrontendType clientType;
 
@@ -75,8 +36,8 @@ public final class FrontendIdentifier implements WritableIdentifier {
     }
 
     public static @NonNull FrontendIdentifier readFrom(final DataInput in) throws IOException {
-        final MemberName memberName = MemberName.readFrom(in);
-        final FrontendType clientType = FrontendType.readFrom(in);
+        final var memberName = MemberName.readFrom(in);
+        final var clientType = FrontendType.readFrom(in);
         return new FrontendIdentifier(memberName, clientType);
     }
 
@@ -101,15 +62,8 @@ public final class FrontendIdentifier implements WritableIdentifier {
 
     @Override
     public boolean equals(final Object obj) {
-        if (this == obj) {
-            return true;
-        }
-        if (!(obj instanceof FrontendIdentifier)) {
-            return false;
-        }
-
-        final FrontendIdentifier other = (FrontendIdentifier) obj;
-        return memberName.equals(other.memberName) && clientType.equals(other.clientType);
+        return this == obj || obj instanceof FrontendIdentifier other && memberName.equals(other.memberName)
+            && clientType.equals(other.clientType);
     }
 
     public @NonNull String toPersistentId() {
@@ -121,7 +75,8 @@ public final class FrontendIdentifier implements WritableIdentifier {
         return toPersistentId();
     }
 
+    @java.io.Serial
     private Object writeReplace() {
-        return new Proxy(memberName, clientType);
+        return new FI(this);
     }
 }
index 2a2a5b2b30af2d26bd4c39e75844401611d32129..66191816136573bdce79299a33d388012b163cf3 100644 (file)
@@ -11,16 +11,12 @@ import static com.google.common.base.Preconditions.checkArgument;
 import static com.google.common.base.Verify.verifyNotNull;
 import static java.util.Objects.requireNonNull;
 
-import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects;
 import com.google.common.base.Strings;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.DataInput;
 import java.io.DataOutput;
-import java.io.Externalizable;
 import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import java.nio.charset.StandardCharsets;
 import java.util.regex.Pattern;
 import org.eclipse.jdt.annotation.NonNull;
@@ -31,47 +27,12 @@ import org.opendaylight.yangtools.concepts.WritableIdentifier;
  * An {@link Identifier} identifying a data store frontend type, which is able to access the data store backend.
  * Frontend implementations need to define this identifier so that multiple clients existing on a member node can be
  * discerned.
- *
- * @author Robert Varga
  */
-@Beta
 public final class FrontendType implements Comparable<FrontendType>, WritableIdentifier {
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-        private byte[] serialized;
-
-        // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-        // be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final byte[] serialized) {
-            this.serialized = requireNonNull(serialized);
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeInt(serialized.length);
-            out.write(serialized);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException {
-            serialized = new byte[in.readInt()];
-            in.readFully(serialized);
-        }
-
-        private Object readResolve() {
-            // TODO: consider caching instances here
-            return new FrontendType(new String(serialized, StandardCharsets.UTF_8), serialized);
-        }
-    }
-
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
     private static final String SIMPLE_STRING_REGEX = "^[a-zA-Z0-9-_.*+:=,!~';]+$";
     private static final Pattern SIMPLE_STRING_PATTERN = Pattern.compile(SIMPLE_STRING_REGEX);
-    private static final long serialVersionUID = 1L;
 
     private final @NonNull String name;
 
@@ -157,7 +118,8 @@ public final class FrontendType implements Comparable<FrontendType>, WritableIde
         return local;
     }
 
-    Object writeReplace() {
-        return new Proxy(getSerialized());
+    @java.io.Serial
+    private Object writeReplace() {
+        return new FT(getSerialized());
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/HI.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/HI.java
new file mode 100644 (file)
index 0000000..ab4d884
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link LocalHistoryIdentifier}.
+ *
+ * @implNote
+ *     cookie is currently required only for module-based sharding, which is implemented as part of normal
+ *     DataBroker interfaces. For DOMDataTreeProducer cookie will always be zero, hence we may end up not needing
+ *     cookie at all.
+ *     We use WritableObjects.writeLongs() to output historyId and cookie (in that order). If we end up not needing
+ *     the cookie at all, we can switch to writeLong() and use zero flags for compatibility.
+ */
+final class HI implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private LocalHistoryIdentifier identifier;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public HI() {
+        // for Externalizable
+    }
+
+    HI(final LocalHistoryIdentifier identifier) {
+        this.identifier = requireNonNull(identifier);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        identifier.getClientId().writeTo(out);
+        WritableObjects.writeLongs(out, identifier.getHistoryId(), identifier.getCookie());
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException {
+        final var clientId = ClientIdentifier.readFrom(in);
+        final byte header = WritableObjects.readLongHeader(in);
+        final var historyId = WritableObjects.readFirstLong(in, header);
+        final var cookie = WritableObjects.readSecondLong(in, header);
+        identifier = new LocalHistoryIdentifier(clientId, historyId, cookie);
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(identifier);
+    }
+}
index 137bf5907f3ca985170f482d49ae48d86debbc56..ddeb2936151b9b8b7f42affa9648a2e53d6a0cb4 100644 (file)
@@ -12,10 +12,7 @@ import static java.util.Objects.requireNonNull;
 import com.google.common.base.MoreObjects;
 import java.io.DataInput;
 import java.io.DataOutput;
-import java.io.Externalizable;
 import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.yangtools.concepts.WritableIdentifier;
 import org.opendaylight.yangtools.concepts.WritableObjects;
@@ -25,59 +22,11 @@ import org.opendaylight.yangtools.concepts.WritableObjects;
  * - a {@link ClientIdentifier}, which uniquely identifies a single instantiation of a particular frontend
  * - an unsigned long, which uniquely identifies the history on the backend
  * - an unsigned long cookie, assigned by the client and meaningless on the backend, which just reflects it back
- *
- * @author Robert Varga
  */
 public final class LocalHistoryIdentifier implements WritableIdentifier {
-    /*
-     * Implementation note: cookie is currently required only for module-based sharding, which is implemented as part
-     *                      of normal DataBroker interfaces. For DOMDataTreeProducer cookie will always be zero, hence
-     *                      we may end up not needing cookie at all.
-     *
-     *                      We use WritableObjects.writeLongs() to output historyId and cookie (in that order). If we
-     *                      end up not needing the cookie at all, we can switch to writeLong() and use zero flags for
-     *                      compatibility.
-     */
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-        private ClientIdentifier clientId;
-        private long historyId;
-        private long cookie;
-
-        // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-        // be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final ClientIdentifier frontendId, final long historyId, final long cookie) {
-            clientId = requireNonNull(frontendId);
-            this.historyId = historyId;
-            this.cookie = cookie;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            clientId.writeTo(out);
-            WritableObjects.writeLongs(out, historyId, cookie);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException {
-            clientId = ClientIdentifier.readFrom(in);
-
-            final byte header = WritableObjects.readLongHeader(in);
-            historyId = WritableObjects.readFirstLong(in, header);
-            cookie = WritableObjects.readSecondLong(in, header);
-        }
-
-        private Object readResolve() {
-            return new LocalHistoryIdentifier(clientId, historyId, cookie);
-        }
-    }
-
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+
     private final @NonNull ClientIdentifier clientId;
     private final long historyId;
     private final long cookie;
@@ -131,11 +80,10 @@ public final class LocalHistoryIdentifier implements WritableIdentifier {
         if (this == obj) {
             return true;
         }
-        if (!(obj instanceof LocalHistoryIdentifier)) {
+        if (!(obj instanceof LocalHistoryIdentifier other)) {
             return false;
         }
 
-        final LocalHistoryIdentifier other = (LocalHistoryIdentifier) obj;
         return historyId == other.historyId && cookie == other.cookie && clientId.equals(other.clientId);
     }
 
@@ -146,7 +94,8 @@ public final class LocalHistoryIdentifier implements WritableIdentifier {
                 .add("cookie", Long.toUnsignedString(cookie, 16)).toString();
     }
 
+    @java.io.Serial
     private Object writeReplace() {
-        return new Proxy(clientId, historyId, cookie);
+        return new HI(this);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/MN.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/MN.java
new file mode 100644 (file)
index 0000000..37b9fb8
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.nio.charset.StandardCharsets;
+
+/**
+ * Serialization proxy for {@link MemberName}.
+ */
+final class MN implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private byte[] serialized;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public MN() {
+        // for Externalizable
+    }
+
+    MN(final byte[] serialized) {
+        this.serialized = requireNonNull(serialized);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        out.writeInt(serialized.length);
+        out.write(serialized);
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException {
+        serialized = new byte[in.readInt()];
+        in.readFully(serialized);
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        // TODO: consider caching instances here
+        return new MemberName(new String(serialized, StandardCharsets.UTF_8), serialized);
+    }
+}
index 3cc2b0c6398ec59bde76877648e40bc7e4e71c27..daab643f8a6e13647d7e92554718a49e70b14570 100644 (file)
@@ -11,60 +11,21 @@ import static com.google.common.base.Preconditions.checkArgument;
 import static com.google.common.base.Verify.verifyNotNull;
 import static java.util.Objects.requireNonNull;
 
-import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects;
 import com.google.common.base.Strings;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.DataInput;
 import java.io.DataOutput;
-import java.io.Externalizable;
 import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import java.nio.charset.StandardCharsets;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.yangtools.concepts.WritableIdentifier;
 
 /**
  * Type-safe encapsulation of a cluster member name.
- *
- * @author Robert Varga
  */
-@Beta
 public final class MemberName implements Comparable<MemberName>, WritableIdentifier {
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-        private byte[] serialized;
-
-        // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-        // be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final byte[] serialized) {
-            this.serialized = requireNonNull(serialized);
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeInt(serialized.length);
-            out.write(serialized);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException {
-            serialized = new byte[in.readInt()];
-            in.readFully(serialized);
-        }
-
-        private Object readResolve() {
-            // TODO: consider caching instances here
-            return new MemberName(new String(serialized, StandardCharsets.UTF_8), serialized);
-        }
-    }
-
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final @NonNull String name;
@@ -140,7 +101,8 @@ public final class MemberName implements Comparable<MemberName>, WritableIdentif
         return local;
     }
 
+    @java.io.Serial
     Object writeReplace() {
-        return new Proxy(getSerialized());
+        return new MN(getSerialized());
     }
 }
index 5456fbb19ee97cffeb90da346ebfab99e4b32d70..9748264e7fc0144bd2e9780599418600f2c28ae3 100644 (file)
@@ -10,15 +10,24 @@ package org.opendaylight.controller.cluster.access.concepts;
 import static com.google.common.base.Verify.verifyNotNull;
 import static java.util.Objects.requireNonNull;
 
-import com.google.common.annotations.Beta;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.MoreObjects;
 import com.google.common.base.MoreObjects.ToStringHelper;
+import java.io.DataInput;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.NotSerializableException;
+import java.io.ObjectInput;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutput;
+import java.io.ObjectOutputStream;
+import java.io.ObjectStreamException;
 import java.io.Serializable;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.yangtools.concepts.Immutable;
 import org.opendaylight.yangtools.concepts.WritableIdentifier;
+import org.opendaylight.yangtools.concepts.WritableObjects;
 
 /**
  * An abstract concept of a Message. This class cannot be instantiated directly, use its specializations {@link Request}
@@ -47,14 +56,49 @@ import org.opendaylight.yangtools.concepts.WritableIdentifier;
  * Note that this class specifies the {@link Immutable} contract, which means that all subclasses must follow this API
  * contract.
  *
- * @author Robert Varga
- *
  * @param <T> Target identifier type
  * @param <C> Message type
  */
-@Beta
-public abstract class Message<T extends WritableIdentifier, C extends Message<T, C>> implements Immutable,
-        Serializable {
+public abstract class Message<T extends WritableIdentifier, C extends Message<T, C>>
+        implements Immutable, Serializable {
+    /**
+     * Externalizable proxy for use with {@link Message} subclasses.
+     *
+     * @param <T> Target identifier type
+     * @param <C> Message class
+     */
+    protected interface SerialForm<T extends WritableIdentifier, C extends Message<T, C>> extends Externalizable {
+
+        @NonNull C message();
+
+        void setMessage(@NonNull C message);
+
+        @Override
+        default void writeExternal(final ObjectOutput out) throws IOException {
+            final var message = message();
+            message.getTarget().writeTo(out);
+            WritableObjects.writeLong(out, message.getSequence());
+            writeExternal(out, message);
+        }
+
+        void writeExternal(@NonNull ObjectOutput out, @NonNull C msg) throws IOException;
+
+        @Override
+        default void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+            final var target = verifyNotNull(readTarget(in));
+            final var sequence = WritableObjects.readLong(in);
+            setMessage(verifyNotNull(readExternal(in, target, sequence)));
+        }
+
+        @NonNull C readExternal(@NonNull ObjectInput in, @NonNull T target, long sequence)
+            throws IOException, ClassNotFoundException;
+
+        Object readResolve();
+
+        @NonNull T readTarget(@NonNull DataInput in) throws IOException;
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final @NonNull ABIVersion version;
@@ -106,21 +150,14 @@ public abstract class Message<T extends WritableIdentifier, C extends Message<T,
      */
     @SuppressWarnings("unchecked")
     public final @NonNull C toVersion(final @NonNull ABIVersion toVersion) {
-        if (this.version == toVersion) {
+        if (version == toVersion) {
             return (C)this;
         }
 
-        switch (toVersion) {
-            case BORON:
-            case NEON_SR2:
-            case SODIUM_SR1:
-            case MAGNESIUM:
-                return verifyNotNull(cloneAsVersion(toVersion));
-            case TEST_PAST_VERSION:
-            case TEST_FUTURE_VERSION:
-            default:
-                throw new IllegalArgumentException("Unhandled ABI version " + toVersion);
-        }
+        return switch (toVersion) {
+            case POTASSIUM -> verifyNotNull(cloneAsVersion(toVersion));
+            default -> throw new IllegalArgumentException("Unhandled ABI version " + toVersion);
+        };
     }
 
     /**
@@ -159,9 +196,29 @@ public abstract class Message<T extends WritableIdentifier, C extends Message<T,
      * @param reqVersion Requested ABI version
      * @return Proxy for this object
      */
-    abstract @NonNull AbstractMessageProxy<T, C> externalizableProxy(@NonNull ABIVersion reqVersion);
+    protected abstract @NonNull SerialForm<T, C> externalizableProxy(@NonNull ABIVersion reqVersion);
 
+    @java.io.Serial
     protected final Object writeReplace() {
         return externalizableProxy(version);
     }
+
+    protected final void throwNSE() throws NotSerializableException {
+        throw new NotSerializableException(getClass().getName());
+    }
+
+    @java.io.Serial
+    private void readObject(final ObjectInputStream stream) throws IOException, ClassNotFoundException {
+        throwNSE();
+    }
+
+    @java.io.Serial
+    private void readObjectNoData() throws ObjectStreamException {
+        throwNSE();
+    }
+
+    @java.io.Serial
+    private void writeObject(final ObjectOutputStream stream) throws IOException {
+        throwNSE();
+    }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RE.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RE.java
new file mode 100644 (file)
index 0000000..27bf825
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+
+/**
+ * Serialization proxy for {@link RequestEnvelope}.
+ */
+final class RE implements Envelope.SerialForm<Request<?, ?>, RequestEnvelope> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private RequestEnvelope envelope;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public RE() {
+        // for Externalizable
+    }
+
+    RE(final RequestEnvelope envelope) {
+        this.envelope = requireNonNull(envelope);
+    }
+
+    @Override
+    public RequestEnvelope envelope() {
+        return verifyNotNull(envelope);
+    }
+
+    @Override
+    public void setEnvelope(final RequestEnvelope envelope) {
+        this.envelope = requireNonNull(envelope);
+    }
+
+    @Override
+    public RequestEnvelope readExternal(final ObjectInput in, final long sessionId, final long txSequence,
+            final Request<?, ?> message) {
+        return new RequestEnvelope(message, sessionId, txSequence);
+    }
+
+    @Override
+    public Object readResolve() {
+        return envelope();
+    }
+}
index 093a3f1eb0c1cdedb4c957cfdb81c85b639ac6f2..97ce498bda202b9cf9f2e2b79ab0b65146ebdd0f 100644 (file)
@@ -10,8 +10,12 @@ package org.opendaylight.controller.cluster.access.concepts;
 import static java.util.Objects.requireNonNull;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import akka.serialization.JavaSerializer;
+import akka.serialization.Serialization;
 import com.google.common.base.MoreObjects.ToStringHelper;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.yangtools.concepts.WritableIdentifier;
@@ -20,14 +24,31 @@ import org.opendaylight.yangtools.concepts.WritableIdentifier;
  * A request message concept. Upon receipt of this message, the recipient will respond with either
  * a {@link RequestSuccess} or a {@link RequestFailure} message.
  *
- * @author Robert Varga
- *
  * @param <T> Target identifier type
  * @param <C> Message type
  */
-@Beta
 public abstract class Request<T extends WritableIdentifier, C extends Request<T, C>> extends Message<T, C> {
+    protected interface SerialForm<T extends WritableIdentifier, C extends Request<T, C>>
+            extends Message.SerialForm<T, C> {
+        @Override
+        default C readExternal(final ObjectInput in, final T target, final long sequence)
+                throws ClassNotFoundException, IOException {
+            return readExternal(in, target, sequence,
+                JavaSerializer.currentSystem().value().provider().resolveActorRef((String) in.readObject()));
+        }
+
+        @NonNull C readExternal(@NonNull ObjectInput in, @NonNull T target, long sequence, @NonNull ActorRef replyTo)
+            throws IOException;
+
+        @Override
+        default void writeExternal(final ObjectOutput out, final C msg) throws IOException {
+            out.writeObject(Serialization.serializedActorPath(msg.getReplyTo()));
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+
     private final @NonNull ActorRef replyTo;
 
     protected Request(final @NonNull T target, final long sequence, final @NonNull ActorRef replyTo) {
@@ -63,5 +84,5 @@ public abstract class Request<T extends WritableIdentifier, C extends Request<T,
     }
 
     @Override
-    protected abstract AbstractRequestProxy<T, C> externalizableProxy(ABIVersion version);
+    protected abstract SerialForm<T, C> externalizableProxy(ABIVersion version);
 }
index 46d5d1f99620b83304b49e7d160a6648768dce65..e8983697cf659e56ad99fa347278d8c0df58e596 100644 (file)
@@ -10,6 +10,7 @@ package org.opendaylight.controller.cluster.access.concepts;
 import akka.actor.ActorRef;
 
 public final class RequestEnvelope extends Envelope<Request<?, ?>> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public RequestEnvelope(final Request<?, ?> message, final long sessionId, final long txSequence) {
@@ -17,8 +18,8 @@ public final class RequestEnvelope extends Envelope<Request<?, ?>> {
     }
 
     @Override
-    RequestEnvelopeProxy createProxy() {
-        return new RequestEnvelopeProxy(this);
+    RE createProxy() {
+        return new RE(this);
     }
 
     /**
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestEnvelopeProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestEnvelopeProxy.java
deleted file mode 100644 (file)
index 66e7eaa..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-final class RequestEnvelopeProxy extends AbstractEnvelopeProxy<Request<?, ?>> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public RequestEnvelopeProxy() {
-        // for Externalizable
-    }
-
-    RequestEnvelopeProxy(final RequestEnvelope envelope) {
-        super(envelope);
-    }
-
-    @Override
-    RequestEnvelope createEnvelope(final Request<?, ?> message, final long sessionId, final long txSequence) {
-        return new RequestEnvelope(message, sessionId, txSequence);
-    }
-}
index 7267edea4f5d19e7bc10137b37857c849e2200b2..d1120c61ef468f8060cc6b67261b895f5bda5c04 100644 (file)
@@ -9,16 +9,13 @@ package org.opendaylight.controller.cluster.access.concepts;
 
 import static java.util.Objects.requireNonNull;
 
-import com.google.common.annotations.Beta;
 import org.eclipse.jdt.annotation.NonNull;
 
 /**
  * A failure cause behind a {@link RequestFailure} to process a {@link Request}.
- *
- * @author Robert Varga
  */
-@Beta
 public abstract class RequestException extends Exception {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     protected RequestException(final @NonNull String message) {
index db5a15b0bc57c5cda03bfd05ffaed53db7550024..7204912669cabf88596be24a5e82e385a4e1dcaf 100644 (file)
@@ -9,8 +9,10 @@ package org.opendaylight.controller.cluster.access.concepts;
 
 import static java.util.Objects.requireNonNull;
 
-import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects.ToStringHelper;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.yangtools.concepts.WritableIdentifier;
@@ -18,14 +20,33 @@ import org.opendaylight.yangtools.concepts.WritableIdentifier;
 /**
  * A failure response to a {@link Request}. Contains a {@link RequestException} detailing the cause for this failure.
  *
- * @author Robert Varga
- *
  * @param <T> Target identifier type
  * @param <C> Message class
  */
-@Beta
 public abstract class RequestFailure<T extends WritableIdentifier, C extends RequestFailure<T, C>>
         extends Response<T, C> {
+    /**
+     * Externalizable proxy for use with {@link RequestFailure} subclasses.
+     *
+     * @param <T> Target identifier type
+     */
+    protected interface SerialForm<T extends WritableIdentifier, C extends RequestFailure<T, C>>
+            extends Message.SerialForm<T, C> {
+        @Override
+        default C readExternal(final ObjectInput in, final T target, final long sequence)
+                throws IOException, ClassNotFoundException {
+            return createFailure(target, sequence, (RequestException) in.readObject());
+        }
+
+        @Override
+        default void writeExternal(final ObjectOutput out, final C msg) throws IOException {
+            out.writeObject(msg.getCause());
+        }
+
+        @NonNull C createFailure(@NonNull T target, long sequence, @NonNull RequestException failureCause);
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final @NonNull RequestException cause;
@@ -65,5 +86,5 @@ public abstract class RequestFailure<T extends WritableIdentifier, C extends Req
     }
 
     @Override
-    protected abstract AbstractRequestFailureProxy<T, C> externalizableProxy(ABIVersion version);
+    protected abstract SerialForm<T, C> externalizableProxy(ABIVersion version);
 }
index 9b60d21b0d2fc13b9aa391ac6460629f457c9d33..f7e59ed1e6597019c43a04826fc5f006fbfea7d9 100644 (file)
@@ -7,7 +7,8 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
-import com.google.common.annotations.Beta;
+import java.io.IOException;
+import java.io.ObjectOutput;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.yangtools.concepts.WritableIdentifier;
@@ -15,23 +16,26 @@ import org.opendaylight.yangtools.concepts.WritableIdentifier;
 /**
  * A successful reply to a {@link Request}.
  *
- * @author Robert Varga
- *
  * @param <T> Target identifier type
  */
-@Beta
-public abstract class RequestSuccess<T extends WritableIdentifier, C extends RequestSuccess<T, C>> extends
-        Response<T, C> {
+public abstract class RequestSuccess<T extends WritableIdentifier, C extends RequestSuccess<T, C>>
+        extends Response<T, C> {
+    protected interface SerialForm<T extends WritableIdentifier, C extends RequestSuccess<T, C>>
+            extends Response.SerialForm<T, C> {
+        @Override
+        default void writeExternal(final ObjectOutput out, final C msg) throws IOException {
+            // Defaults to no-op
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
-    protected RequestSuccess(final @NonNull C success,  final @NonNull ABIVersion version) {
+    protected RequestSuccess(final @NonNull C success, final @NonNull ABIVersion version) {
         super(success, version);
     }
 
     protected RequestSuccess(final @NonNull T target, final long sequence) {
         super(target, sequence);
     }
-
-    @Override
-    protected abstract AbstractSuccessProxy<T, C> externalizableProxy(ABIVersion version);
 }
index f733a9e9199b31c1ed8b4afeb84549c8685bad78..a41fa01db99d2a9c1701be940ce52115a43b2eb4 100644 (file)
@@ -7,7 +7,6 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
-import com.google.common.annotations.Beta;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.yangtools.concepts.WritableIdentifier;
@@ -17,13 +16,16 @@ import org.opendaylight.yangtools.concepts.WritableIdentifier;
  * {@link RequestFailure} and {@link RequestSuccess}, which provide appropriate specialization. It is visible purely for
  * the purpose of allowing to check if an object is either of those specializations with a single instanceof check.
  *
- * @author Robert Varga
- *
  * @param <T> Target identifier type
  * @param <C> Message type
  */
-@Beta
 public abstract class Response<T extends WritableIdentifier, C extends Response<T, C>> extends Message<T, C> {
+    protected interface SerialForm<T extends WritableIdentifier, C extends Response<T, C>>
+            extends Message.SerialForm<T, C> {
+
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     Response(final @NonNull T target, final long sequence) {
@@ -33,7 +35,4 @@ public abstract class Response<T extends WritableIdentifier, C extends Response<
     Response(final @NonNull C response, final @NonNull ABIVersion version) {
         super(response, version);
     }
-
-    @Override
-    abstract AbstractResponseProxy<T, C> externalizableProxy(ABIVersion version);
 }
index 7936baa1696059b184b8ff55f548ac5fd6e9229b..50d1e7434ce1c845fb9c5948f0759b41d526fb88 100644 (file)
@@ -7,16 +7,39 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
-import com.google.common.base.Preconditions;
+import static com.google.common.base.Preconditions.checkArgument;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.concepts.WritableObjects;
 
 public abstract class ResponseEnvelope<T extends Response<?, ?>> extends Envelope<T> {
+    interface SerialForm<T extends Response<?, ?>, E extends ResponseEnvelope<T>> extends Envelope.SerialForm<T, E> {
+        @Override
+        default void writeExternal(final ObjectOutput out, final @NonNull E envelope) throws IOException {
+            Envelope.SerialForm.super.writeExternal(out, envelope);
+            WritableObjects.writeLong(out, envelope.getExecutionTimeNanos());
+        }
+
+        @Override
+        default E readExternal(final ObjectInput in, final long sessionId, final long txSequence, final T message)
+                throws IOException {
+            return readExternal(in, sessionId, txSequence, message, WritableObjects.readLong(in));
+        }
+
+        E readExternal(ObjectInput in, long sessionId, long txSequence, T message, long executionTimeNanos);
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final long executionTimeNanos;
 
     ResponseEnvelope(final T message, final long sessionId, final long txSequence, final long executionTimeNanos) {
         super(message, sessionId, txSequence);
-        Preconditions.checkArgument(executionTimeNanos >= 0);
+        checkArgument(executionTimeNanos >= 0, "Negative executionTime");
         this.executionTimeNanos = executionTimeNanos;
     }
 
@@ -29,7 +52,4 @@ public abstract class ResponseEnvelope<T extends Response<?, ?>> extends Envelop
     public final long getExecutionTimeNanos() {
         return executionTimeNanos;
     }
-
-    @Override
-    abstract AbstractResponseEnvelopeProxy<T> createProxy();
 }
index 7730318f578d31c02272abac6faad7ff193f4f99..3f1f71d17e830beaa572a054421df84841003480 100644 (file)
@@ -7,16 +7,12 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
-import com.google.common.annotations.Beta;
-
 /**
  * General error raised when the recipient of a {@link Request} determines that the request contains
  * a {@link ClientIdentifier} which corresponds to an outdated generation.
- *
- * @author Robert Varga
  */
-@Beta
 public final class RetiredGenerationException extends RequestException {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public RetiredGenerationException(final long originatingGeneration, final long newGeneration) {
index 7fb0ef070112ed34a2e333723f51815044e0764e..3f886a85102c5813bed14fb3a84b11791dfaa4ef 100644 (file)
@@ -10,16 +10,13 @@ package org.opendaylight.controller.cluster.access.concepts;
 import static com.google.common.base.Preconditions.checkArgument;
 import static java.util.Objects.requireNonNull;
 
-import com.google.common.annotations.Beta;
 import com.google.common.base.Strings;
 
 /**
  * General error raised when the recipient of a {@link Request} fails to process a request.
- *
- * @author Robert Varga
  */
-@Beta
 public final class RuntimeRequestException extends RequestException {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public RuntimeRequestException(final String message, final Throwable cause) {
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SE.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SE.java
new file mode 100644 (file)
index 0000000..3e8ce6f
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+
+/**
+ * Serialization proxy for {@link SuccessEnvelope}.
+ */
+final class SE implements ResponseEnvelope.SerialForm<RequestSuccess<?, ?>, SuccessEnvelope> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private SuccessEnvelope envelope;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public SE() {
+        // for Externalizable
+    }
+
+    SE(final SuccessEnvelope envelope) {
+        this.envelope = requireNonNull(envelope);
+    }
+
+    @Override
+    public SuccessEnvelope envelope() {
+        return verifyNotNull(envelope);
+    }
+
+    @Override
+    public void setEnvelope(final SuccessEnvelope envelope) {
+        this.envelope = requireNonNull(envelope);
+    }
+
+    @Override
+    public SuccessEnvelope readExternal(final ObjectInput in, final long sessionId, final long txSequence,
+            final RequestSuccess<?, ?> message, final long executionTimeNanos) {
+        return new SuccessEnvelope(message, sessionId, txSequence, executionTimeNanos);
+    }
+
+    @Override
+    public Object readResolve() {
+        return envelope();
+    }
+}
index cd3e2608d60ea03cb052dcdfa8a0f52d69d69ad5..118e9262a7dad58bc06d64435da0ac810ee723f7 100644 (file)
@@ -7,14 +7,12 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
-import com.google.common.annotations.Beta;
-
 /**
  * A tagging interface that specifies a message whose serialized size can be large and thus should be sliced into
  * smaller chunks when transporting over the wire.
  *
  * @author Thomas Pantelis
  */
-@Beta
 public interface SliceableMessage {
+    // Marker interface
 }
index 3c23a23763cd071abb2e967f7c58b6e9cd633ca1..2644c6ff0f8856a550ec8cdacbe227e4afa538ca 100644 (file)
@@ -8,6 +8,7 @@
 package org.opendaylight.controller.cluster.access.concepts;
 
 public final class SuccessEnvelope extends ResponseEnvelope<RequestSuccess<?, ?>> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public SuccessEnvelope(final RequestSuccess<?, ?> message, final long sessionId, final long txSequence,
@@ -16,7 +17,7 @@ public final class SuccessEnvelope extends ResponseEnvelope<RequestSuccess<?, ?>
     }
 
     @Override
-    SuccessEnvelopeProxy createProxy() {
-        return new SuccessEnvelopeProxy(this);
+    SE createProxy() {
+        return new SE(this);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SuccessEnvelopeProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SuccessEnvelopeProxy.java
deleted file mode 100644 (file)
index 3ac388b..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-final class SuccessEnvelopeProxy extends AbstractResponseEnvelopeProxy<RequestSuccess<?, ?>> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public SuccessEnvelopeProxy() {
-        // for Externalizable
-    }
-
-    SuccessEnvelopeProxy(final SuccessEnvelope envelope) {
-        super(envelope);
-    }
-
-    @Override
-    ResponseEnvelope<RequestSuccess<?, ?>> createEnvelope(final RequestSuccess<?, ?> message, final long sessionId,
-            final long txSequence, final long executionTimeNanos) {
-        return new SuccessEnvelope(message, sessionId, txSequence, executionTimeNanos);
-    }
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/TI.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/TI.java
new file mode 100644 (file)
index 0000000..8bc927f
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link TransactionIdentifier}.
+ */
+final class TI implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionIdentifier identifier;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public TI() {
+        // for Externalizable
+    }
+
+    TI(final TransactionIdentifier identifier) {
+        this.identifier = requireNonNull(identifier);
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException {
+        identifier = new TransactionIdentifier(LocalHistoryIdentifier.readFrom(in), WritableObjects.readLong(in));
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        identifier.getHistoryId().writeTo(out);
+        WritableObjects.writeLong(out, identifier.getTransactionId());
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(identifier);
+    }
+}
index d2a92ea1913374a2dfa693195f27f1916938d347..ea72c847501a79a8982af88b1fd6aadfc90784c4 100644 (file)
@@ -9,59 +9,20 @@ package org.opendaylight.controller.cluster.access.concepts;
 
 import static java.util.Objects.requireNonNull;
 
-import com.google.common.annotations.Beta;
 import java.io.DataInput;
 import java.io.DataOutput;
-import java.io.Externalizable;
 import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.yangtools.concepts.WritableIdentifier;
 import org.opendaylight.yangtools.concepts.WritableObjects;
 
 /**
  * Globally-unique identifier of a transaction.
- *
- * @author Robert Varga
  */
-@Beta
 public final class TransactionIdentifier implements WritableIdentifier {
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-        private LocalHistoryIdentifier historyId;
-        private long transactionId;
-
-        // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-        // be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final LocalHistoryIdentifier historyId, final long transactionId) {
-            this.historyId = requireNonNull(historyId);
-            this.transactionId = transactionId;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            historyId.writeTo(out);
-            WritableObjects.writeLong(out, transactionId);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException {
-            historyId = LocalHistoryIdentifier.readFrom(in);
-            transactionId = WritableObjects.readLong(in);
-        }
-
-        private Object readResolve() {
-            return new TransactionIdentifier(historyId, transactionId);
-        }
-    }
-
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+
     private final @NonNull LocalHistoryIdentifier historyId;
     private final long transactionId;
     private String shortString;
@@ -97,15 +58,8 @@ public final class TransactionIdentifier implements WritableIdentifier {
 
     @Override
     public boolean equals(final Object obj) {
-        if (this == obj) {
-            return true;
-        }
-        if (!(obj instanceof TransactionIdentifier)) {
-            return false;
-        }
-
-        final TransactionIdentifier other = (TransactionIdentifier) obj;
-        return transactionId == other.transactionId && historyId.equals(other.historyId);
+        return this == obj || obj instanceof TransactionIdentifier other && transactionId == other.transactionId
+            && historyId.equals(other.historyId);
     }
 
     public String toShortString() {
@@ -125,7 +79,8 @@ public final class TransactionIdentifier implements WritableIdentifier {
         return toShortString();
     }
 
+    @java.io.Serial
     private Object writeReplace() {
-        return new Proxy(historyId, transactionId);
+        return new TI(this);
     }
 }
index 903ed59fbc04c00d3f7121d11edfc11875dc76b7..1de266d2851d2f14975f86f64f5a4b20971fefd4 100644 (file)
@@ -7,16 +7,12 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
-import com.google.common.annotations.Beta;
-
 /**
  * General error raised when the recipient of a {@link Request} determines that it does not know how to handle
  * the request.
- *
- * @author Robert Varga
  */
-@Beta
 public final class UnsupportedRequestException extends RequestException {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public UnsupportedRequestException(final Request<?, ?> request) {
index f9e9c0c95473e7d39f7bd4315ae2a434f32aba4d..1513f363969aa54331d7469c4b5ce228136139de 100644 (file)
@@ -8,8 +8,9 @@
 package org.opendaylight.controller.cluster.access;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThrows;
 import static org.junit.Assert.assertTrue;
-import static org.opendaylight.controller.cluster.access.ABIVersion.BORON;
+import static org.opendaylight.controller.cluster.access.ABIVersion.POTASSIUM;
 import static org.opendaylight.controller.cluster.access.ABIVersion.TEST_FUTURE_VERSION;
 import static org.opendaylight.controller.cluster.access.ABIVersion.TEST_PAST_VERSION;
 
@@ -22,25 +23,25 @@ public class ABIVersionTest {
     @Test
     public void testInvalidVersions() {
         assertTrue(TEST_PAST_VERSION.compareTo(TEST_FUTURE_VERSION) < 0);
-        assertTrue(TEST_PAST_VERSION.compareTo(BORON) < 0);
-        assertTrue(TEST_FUTURE_VERSION.compareTo(BORON) > 0);
+        assertTrue(TEST_PAST_VERSION.compareTo(POTASSIUM) < 0);
+        assertTrue(TEST_FUTURE_VERSION.compareTo(POTASSIUM) > 0);
     }
 
     @Test
-    public void testBoronVersion() throws Exception {
-        assertEquals((short)5, BORON.shortValue());
-        assertEquals(BORON, ABIVersion.valueOf(BORON.shortValue()));
-        assertEquals(BORON, ABIVersion.readFrom(ByteStreams.newDataInput(writeVersion(BORON))));
+    public void testMagnesiumVersion() throws Exception {
+        assertEquals((short)10, POTASSIUM.shortValue());
+        assertEquals(POTASSIUM, ABIVersion.valueOf(POTASSIUM.shortValue()));
+        assertEquals(POTASSIUM, ABIVersion.readFrom(ByteStreams.newDataInput(writeVersion(POTASSIUM))));
     }
 
-    @Test(expected = PastVersionException.class)
-    public void testInvalidPastVersion() throws Exception {
-        ABIVersion.valueOf(TEST_PAST_VERSION.shortValue());
+    @Test
+    public void testInvalidPastVersion() {
+        assertThrows(PastVersionException.class, () -> ABIVersion.valueOf(TEST_PAST_VERSION.shortValue()));
     }
 
-    @Test(expected = FutureVersionException.class)
-    public void testInvalidFutureVersion() throws Exception {
-        ABIVersion.valueOf(TEST_FUTURE_VERSION.shortValue());
+    @Test
+    public void testInvalidFutureVersion() {
+        assertThrows(FutureVersionException.class, () -> ABIVersion.valueOf(TEST_FUTURE_VERSION.shortValue()));
     }
 
     private static byte[] writeVersion(final ABIVersion version) {
@@ -49,8 +50,9 @@ public class ABIVersionTest {
         return bado.toByteArray();
     }
 
-    @Test(expected = IOException.class)
-    public void testBadRead() throws IOException {
-        ABIVersion.readFrom(ByteStreams.newDataInput(writeVersion(TEST_PAST_VERSION)));
+    @Test
+    public void testBadRead() {
+        final var in = ByteStreams.newDataInput(writeVersion(TEST_PAST_VERSION));
+        assertThrows(IOException.class, () -> ABIVersion.readFrom(in));
     }
 }
index 60e7dc83884c15f7bce09bf280307c54bee28ffa..48465208e21a27007fbe9512cfd1c63a200e213e 100644 (file)
@@ -7,7 +7,8 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendType;
@@ -25,14 +26,12 @@ public class AbortLocalTransactionRequestTest
 
     private static final AbortLocalTransactionRequest OBJECT = new AbortLocalTransactionRequest(TRANSACTION, ACTOR_REF);
 
-    @Override
-    protected AbortLocalTransactionRequest object() {
-        return OBJECT;
+    public AbortLocalTransactionRequestTest() {
+        super(OBJECT);
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof AbortLocalTransactionRequest);
-        Assert.assertEquals(OBJECT.getReplyTo(), ((AbortLocalTransactionRequest) deserialize).getReplyTo());
+    protected void doAdditionalAssertions(final AbortLocalTransactionRequest deserialize) {
+        assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
     }
 }
\ No newline at end of file
index e40a39450b78c7b5462cae45b2cf9407c369a9cc..1cb9af38a01330d74bccf98e667d7771db275542 100644 (file)
@@ -7,24 +7,34 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.apache.commons.lang.SerializationUtils;
-import org.junit.Assert;
+import static org.hamcrest.CoreMatchers.allOf;
+import static org.hamcrest.CoreMatchers.endsWith;
+import static org.hamcrest.CoreMatchers.startsWith;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertThrows;
+
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public abstract class AbstractLocalTransactionRequestTest<T extends AbstractLocalTransactionRequest<T>>
         extends AbstractTransactionRequestTest<T> {
-    @Override
-    protected abstract T object();
+    protected AbstractLocalTransactionRequestTest(final T object) {
+        super(object, -1);
+    }
 
     @Test
     public void cloneAsVersionTest() {
-        Assert.assertEquals(object(), object().cloneAsVersion(ABIVersion.BORON));
+        assertSame(object(), object().cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION));
     }
 
     @Override
-    @Test(expected = UnsupportedOperationException.class)
+    @Test
     public void serializationTest() {
-        SerializationUtils.clone(object());
+        final var ex = assertThrows(UnsupportedOperationException.class, () -> SerializationUtils.clone(object()));
+        assertThat(ex.getMessage(), allOf(
+            startsWith("Local transaction request "),
+            endsWith(" should never be serialized")));
     }
 }
index f1fe2c08f911f7fe3f05bfb682d99d7325071d66..5ae72198880b0c7c31f854b9ddca6e2e3d32daa1 100644 (file)
@@ -7,32 +7,36 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+
 import com.google.common.base.MoreObjects;
-import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 
 public abstract class AbstractReadTransactionRequestTest<T extends AbstractReadPathTransactionRequest<T>>
         extends AbstractTransactionRequestTest<T> {
-    protected static final YangInstanceIdentifier PATH = YangInstanceIdentifier.empty();
+    protected static final YangInstanceIdentifier PATH = YangInstanceIdentifier.of();
     protected static final boolean SNAPSHOT_ONLY = true;
 
-    @Override
-    protected abstract T object();
+    protected AbstractReadTransactionRequestTest(final T object, final int baseSize) {
+        super(object, baseSize);
+    }
 
     @Test
     public void getPathTest() {
-        Assert.assertEquals(PATH, object().getPath());
+        assertEquals(PATH, object().getPath());
     }
 
     @Test
     public void isSnapshotOnlyTest() {
-        Assert.assertEquals(SNAPSHOT_ONLY, object().isSnapshotOnly());
+        assertEquals(SNAPSHOT_ONLY, object().isSnapshotOnly());
     }
 
     @Test
     public void addToStringAttributesTest() {
-        final MoreObjects.ToStringHelper result = object().addToStringAttributes(MoreObjects.toStringHelper(object()));
-        Assert.assertTrue(result.toString().contains("path=" + PATH));
+        final var result = object().addToStringAttributes(MoreObjects.toStringHelper(object())).toString();
+        assertThat(result, containsString("path=" + PATH));
     }
 }
index ccdc6753ac640600d972d4591ba8246814ae96d8..78456b246afaf9014776b047ac33dfec48952bed 100644 (file)
@@ -7,8 +7,11 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.apache.commons.lang.SerializationUtils;
-import org.junit.Assert;
+import static java.util.Objects.requireNonNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
@@ -29,26 +32,36 @@ public abstract class AbstractRequestFailureTest<T extends RequestFailure<?, T>>
     protected static final TransactionIdentifier TRANSACTION_IDENTIFIER = new TransactionIdentifier(
             HISTORY_IDENTIFIER, 0);
     protected static final RequestException CAUSE = new RuntimeRequestException("fail", new Throwable());
+    private static final int CAUSE_SIZE = SerializationUtils.serialize(CAUSE).length;
+
+    private final T object;
+    private final int expectedSize;
 
-    abstract T object();
+    protected AbstractRequestFailureTest(final T object, final int baseSize) {
+        this.object = requireNonNull(object);
+        this.expectedSize = baseSize + CAUSE_SIZE;
+    }
 
     @Test
     public void getCauseTest() {
-        Assert.assertEquals(CAUSE, object().getCause());
+        assertEquals(CAUSE, object.getCause());
     }
 
     @Test
     public void isHardFailureTest() {
-        Assert.assertTrue(object().isHardFailure());
+        assertTrue(object.isHardFailure());
     }
 
-    @SuppressWarnings("unchecked")
     @Test
     public void serializationTest() {
-        final Object deserialize = SerializationUtils.clone(object());
+        final var bytes = SerializationUtils.serialize(object);
+        assertEquals(expectedSize, bytes.length);
+
+        @SuppressWarnings("unchecked")
+        final var deserialize = (T) SerializationUtils.deserialize(bytes);
 
-        Assert.assertEquals(object().getTarget(), ((T) deserialize).getTarget());
-        Assert.assertEquals(object().getVersion(), ((T) deserialize).getVersion());
-        Assert.assertEquals(object().getSequence(), ((T) deserialize).getSequence());
+        assertEquals(object.getTarget(), deserialize.getTarget());
+        assertEquals(object.getVersion(), deserialize.getVersion());
+        assertEquals(object.getSequence(), deserialize.getSequence());
     }
 }
index 8a812522336588517faff0a1a490196a1c46c077..b0038758c787e75033d67ddd72d339603e9a44fc 100644 (file)
@@ -7,8 +7,11 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.apache.commons.lang.SerializationUtils;
-import org.junit.Assert;
+import static java.util.Objects.requireNonNull;
+import static org.junit.Assert.assertEquals;
+
+import org.apache.commons.lang3.SerializationUtils;
+import org.eclipse.jdt.annotation.NonNull;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
@@ -18,25 +21,34 @@ import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
 
 public abstract class AbstractRequestSuccessTest<T extends RequestSuccess<?, T>> {
-
     private static final FrontendIdentifier FRONTEND_IDENTIFIER = FrontendIdentifier.create(
             MemberName.forName("test"), FrontendType.forName("one"));
     protected static final ClientIdentifier CLIENT_IDENTIFIER = ClientIdentifier.create(FRONTEND_IDENTIFIER, 0);
-    protected static final LocalHistoryIdentifier HISTORY_IDENTIFIER = new LocalHistoryIdentifier(
-            CLIENT_IDENTIFIER, 0);
+    protected static final LocalHistoryIdentifier HISTORY_IDENTIFIER = new LocalHistoryIdentifier(CLIENT_IDENTIFIER, 0);
+
+    private final @NonNull T object;
+    private final int expectedSize;
 
-    protected abstract T object();
+    protected AbstractRequestSuccessTest(final T object, final int expectedSize) {
+        this.object = requireNonNull(object);
+        this.expectedSize = expectedSize;
+    }
 
-    @SuppressWarnings("unchecked")
     @Test
     public void serializationTest() {
-        final Object deserialize = SerializationUtils.clone(object());
+        final var bytes = SerializationUtils.serialize(object);
+        assertEquals(expectedSize, bytes.length);
+
+        @SuppressWarnings("unchecked")
+        final var deserialize = (T) SerializationUtils.deserialize(bytes);
 
-        Assert.assertEquals(object().getTarget(), ((T) deserialize).getTarget());
-        Assert.assertEquals(object().getVersion(), ((T) deserialize).getVersion());
-        Assert.assertEquals(object().getSequence(), ((T) deserialize).getSequence());
+        assertEquals(object.getTarget(), deserialize.getTarget());
+        assertEquals(object.getVersion(), deserialize.getVersion());
+        assertEquals(object.getSequence(), deserialize.getSequence());
         doAdditionalAssertions(deserialize);
     }
 
-    protected abstract void doAdditionalAssertions(Object deserialize);
+    protected void doAdditionalAssertions(final T deserialize) {
+        // No-op by default
+    }
 }
index 58d24e4e53838a2c19b674a9251d32fd86cfa245..f276ac3937763895d0c13ec74fb0b7ded97ad985 100644 (file)
@@ -7,7 +7,8 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertNotNull;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.concepts.AbstractRequestTest;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
@@ -15,7 +16,6 @@ import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendType;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.access.concepts.RequestException;
 import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
@@ -29,14 +29,14 @@ public abstract class AbstractTransactionRequestTest<T extends TransactionReques
     protected static final TransactionIdentifier TRANSACTION_IDENTIFIER = new TransactionIdentifier(
             HISTORY_IDENTIFIER, 0);
 
-    @Override
-    protected abstract T object();
+    protected AbstractTransactionRequestTest(final T object, final int baseSize) {
+        super(object, baseSize);
+    }
 
     @Test
     public void toRequestFailureTest() {
-        final Throwable cause = new Throwable();
-        final RequestException exception = new RuntimeRequestException("fail", cause);
-        final TransactionFailure failure = object().toRequestFailure(exception);
-        Assert.assertNotNull(failure);
+        final var exception = new RuntimeRequestException("fail", new Throwable());
+        final var failure = object().toRequestFailure(exception);
+        assertNotNull(failure);
     }
 }
index 08c9abb3af1d6905b904c74e03be822bde2e0cfd..4f00501750f55120599002bb63b3c03088928a0d 100644 (file)
@@ -11,8 +11,10 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
 
 public abstract class AbstractTransactionSuccessTest<T extends TransactionSuccess<T>>
         extends AbstractRequestSuccessTest<T> {
+    protected static final TransactionIdentifier TRANSACTION_IDENTIFIER = new TransactionIdentifier(HISTORY_IDENTIFIER,
+        0);
 
-    protected static final TransactionIdentifier TRANSACTION_IDENTIFIER = new TransactionIdentifier(
-            HISTORY_IDENTIFIER, 0);
-
+    protected AbstractTransactionSuccessTest(final T object, final int expectedSize) {
+        super(object, expectedSize);
+    }
 }
index 61b7dc2ad40b2e95ca6587b3ecc8789b11b4ca3a..f1df2d882f80132a138b7061feeb7f7b050d2789 100644 (file)
@@ -7,8 +7,11 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+
 import com.google.common.base.MoreObjects;
-import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
@@ -17,7 +20,7 @@ import org.opendaylight.controller.cluster.access.concepts.FrontendType;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 public class CommitLocalTransactionRequestTest
         extends AbstractLocalTransactionRequestTest<CommitLocalTransactionRequest> {
@@ -30,34 +33,32 @@ public class CommitLocalTransactionRequestTest
     private static final DataTreeModification MODIFICATION = Mockito.mock(DataTreeModification.class);
     private static final boolean COORDINATED = true;
 
-    private static final CommitLocalTransactionRequest OBJECT = new CommitLocalTransactionRequest(
-            TRANSACTION, 0, ACTOR_REF, MODIFICATION, null, COORDINATED);
+    private static final CommitLocalTransactionRequest OBJECT = new CommitLocalTransactionRequest(TRANSACTION, 0,
+        ACTOR_REF, MODIFICATION, null, COORDINATED);
 
-    @Override
-    protected CommitLocalTransactionRequest object() {
-        return OBJECT;
+    public CommitLocalTransactionRequestTest() {
+        super(OBJECT);
     }
 
     @Test
     public void getModificationTest() {
-        Assert.assertEquals(MODIFICATION, OBJECT.getModification());
+        assertEquals(MODIFICATION, OBJECT.getModification());
     }
 
     @Test
     public void isCoordinatedTest() {
-        Assert.assertEquals(COORDINATED, OBJECT.isCoordinated());
+        assertEquals(COORDINATED, OBJECT.isCoordinated());
     }
 
     @Test
     public void addToStringAttributesTest() {
-        final MoreObjects.ToStringHelper result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT));
-        Assert.assertTrue(result.toString().contains("coordinated=" + COORDINATED));
+        final var result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT)).toString();
+        assertThat(result, containsString("coordinated=" + COORDINATED));
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof CommitLocalTransactionRequest);
-        Assert.assertEquals(OBJECT.getReplyTo(), ((CommitLocalTransactionRequest) deserialize).getReplyTo());
-        Assert.assertEquals(OBJECT.getModification(), ((CommitLocalTransactionRequest) deserialize).getModification());
+    protected void doAdditionalAssertions(final CommitLocalTransactionRequest deserialize) {
+        assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
+        assertEquals(OBJECT.getModification(), deserialize.getModification());
     }
 }
\ No newline at end of file
index 6518102fa144189992ec263174de9e12aba8f12b..2278195d826574f8061390fcdf5cfa623bd2ad2e 100644 (file)
@@ -7,23 +7,23 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class ConnectClientFailureTest extends AbstractRequestFailureTest<ConnectClientFailure> {
     private static final ConnectClientFailure OBJECT = new ConnectClientFailure(CLIENT_IDENTIFIER, 0, CAUSE);
 
-    @Override
-    ConnectClientFailure object() {
-        return OBJECT;
+    public ConnectClientFailureTest() {
+        super(OBJECT, 99);
     }
 
     @Test
     public void cloneAsVersionTest() {
         final ConnectClientFailure clone = OBJECT.cloneAsVersion(ABIVersion.current());
-        Assert.assertEquals(OBJECT.getTarget(), clone.getTarget());
-        Assert.assertEquals(OBJECT.getSequence(), clone.getSequence());
-        Assert.assertEquals(OBJECT.getCause(), clone.getCause());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getCause(), clone.getCause());
     }
 }
\ No newline at end of file
index 64e4717e85aa15595968560f04089b9a5951779a..3bf1951e5037bd4d1e1978b69ef3f5a597f809f7 100644 (file)
@@ -7,9 +7,13 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
 import com.google.common.base.MoreObjects;
 import com.google.common.collect.ImmutableRangeSet;
-import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.AbstractRequestTest;
@@ -17,7 +21,6 @@ import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendType;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.access.concepts.RequestException;
 
 public class ConnectClientRequestTest extends AbstractRequestTest<ConnectClientRequest> {
     private static final FrontendIdentifier FRONTEND_IDENTIFIER = FrontendIdentifier.create(
@@ -30,49 +33,45 @@ public class ConnectClientRequestTest extends AbstractRequestTest<ConnectClientR
     private static final ConnectClientRequest OBJECT = new ConnectClientRequest(
             CLIENT_IDENTIFIER, 0, ACTOR_REF, MIN_VERSION, MAX_VERSION);
 
-    @Override
-    protected ConnectClientRequest object() {
-        return OBJECT;
+    public ConnectClientRequestTest() {
+        super(OBJECT, 112);
     }
 
     @Test
     public void getMinVersionTest() {
-        Assert.assertEquals(MIN_VERSION, OBJECT.getMinVersion());
+        assertEquals(MIN_VERSION, OBJECT.getMinVersion());
     }
 
     @Test
     public void getMaxVersionTest() {
-        Assert.assertEquals(MAX_VERSION, OBJECT.getMaxVersion());
+        assertEquals(MAX_VERSION, OBJECT.getMaxVersion());
     }
 
     @Test
     public void toRequestFailureTest() {
-        final RequestException exception = new DeadTransactionException(ImmutableRangeSet.of());
-        final ConnectClientFailure failure = OBJECT.toRequestFailure(exception);
-        Assert.assertNotNull(failure);
+        final var exception = new DeadTransactionException(ImmutableRangeSet.of());
+        final var failure = OBJECT.toRequestFailure(exception);
+        assertNotNull(failure);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final ConnectClientRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertNotNull(clone);
-        Assert.assertEquals(ABIVersion.BORON, clone.getVersion());
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertNotNull(clone);
+        assertEquals(ABIVersion.TEST_FUTURE_VERSION, clone.getVersion());
     }
 
     @Test
     public void addToStringAttributesTest() {
-        final MoreObjects.ToStringHelper result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT));
-        Assert.assertTrue(result.toString().contains("minVersion=" + MIN_VERSION));
-        Assert.assertTrue(result.toString().contains("maxVersion=" + MAX_VERSION));
+        final var result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT)).toString();
+        assertThat(result, containsString("minVersion=" + MIN_VERSION));
+        assertThat(result, containsString("maxVersion=" + MAX_VERSION));
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof ConnectClientRequest);
-        final ConnectClientRequest casted = (ConnectClientRequest) deserialize;
-
-        Assert.assertEquals(OBJECT.getMaxVersion(), casted.getMaxVersion());
-        Assert.assertEquals(OBJECT.getMinVersion(), casted.getMinVersion());
-        Assert.assertEquals(OBJECT.getReplyTo(), casted.getReplyTo());
+    protected void doAdditionalAssertions(final ConnectClientRequest deserialize) {
+        assertEquals(OBJECT.getMaxVersion(), deserialize.getMaxVersion());
+        assertEquals(OBJECT.getMinVersion(), deserialize.getMinVersion());
+        assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
     }
 }
\ No newline at end of file
index a1892eb35eb6a8d1084ea115f7f5adf5d9295ba5..0267b8eb273191155f499920700ab2b90523db06 100644 (file)
@@ -7,6 +7,9 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.ActorSystem;
@@ -15,20 +18,17 @@ import akka.serialization.JavaSerializer;
 import akka.testkit.TestProbe;
 import com.google.common.base.MoreObjects;
 import com.google.common.collect.ImmutableList;
-import java.util.Collection;
 import java.util.List;
 import java.util.Optional;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
 
 public class ConnectClientSuccessTest extends AbstractRequestSuccessTest<ConnectClientSuccess> {
-
     private static final DataTree TREE = new InMemoryDataTreeFactory().create(
         DataTreeConfiguration.DEFAULT_OPERATIONAL);
     private static final ActorSystem SYSTEM = ActorSystem.create("test");
@@ -36,12 +36,11 @@ public class ConnectClientSuccessTest extends AbstractRequestSuccessTest<Connect
     private static final ActorSelection ACTOR_SELECTION =  ActorSelection.apply(ACTOR_REF, "foo");
     private static final List<ActorSelection> ALTERNATES = ImmutableList.of(ACTOR_SELECTION);
     private static final int MAX_MESSAGES = 10;
-    private static final ConnectClientSuccess OBJECT = new ConnectClientSuccess(
-            CLIENT_IDENTIFIER, 0, ACTOR_REF, ALTERNATES, TREE, MAX_MESSAGES);
+    private static final ConnectClientSuccess OBJECT = new ConnectClientSuccess(CLIENT_IDENTIFIER, 0, ACTOR_REF,
+        ALTERNATES, TREE, MAX_MESSAGES);
 
-    @Override
-    protected ConnectClientSuccess object() {
-        return OBJECT;
+    public ConnectClientSuccessTest() {
+        super(OBJECT, 146 + ACTOR_REF.path().toSerializationFormat().length());
     }
 
     @Before
@@ -51,32 +50,36 @@ public class ConnectClientSuccessTest extends AbstractRequestSuccessTest<Connect
 
     @Test
     public void testGetAlternates() {
-        final Collection<ActorSelection> alternates = OBJECT.getAlternates();
-        Assert.assertArrayEquals(ALTERNATES.toArray(), alternates.toArray());
+        final var alternates = OBJECT.getAlternates();
+        assertArrayEquals(ALTERNATES.toArray(), alternates.toArray());
     }
 
     @Test
     public void testGetBackend() {
         final ActorRef actorRef = OBJECT.getBackend();
-        Assert.assertEquals(ACTOR_REF, actorRef);
+        assertEquals(ACTOR_REF, actorRef);
     }
 
     @Test
     public void testGetDataTree() {
-        final ReadOnlyDataTree tree = OBJECT.getDataTree().get();
-        Assert.assertEquals(TREE, tree);
+        final ReadOnlyDataTree tree = OBJECT.getDataTree().orElseThrow();
+        assertEquals(TREE, tree);
     }
 
     @Test
     public void testGetMaxMessages() {
-        final int maxMessages = OBJECT.getMaxMessages();
-        Assert.assertEquals(MAX_MESSAGES, maxMessages);
+        assertEquals(MAX_MESSAGES, OBJECT.getMaxMessages());
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final ConnectClientSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getAlternates(), clone.getAlternates());
+        assertEquals(OBJECT.getBackend(), clone.getBackend());
+        assertEquals(OBJECT.getDataTree(), clone.getDataTree());
+        assertEquals(OBJECT.getMaxMessages(), clone.getMaxMessages());
     }
 
     @Test
@@ -86,11 +89,10 @@ public class ConnectClientSuccessTest extends AbstractRequestSuccessTest<Connect
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof ConnectClientSuccess);
-        Assert.assertEquals(OBJECT.getAlternates().size(), ((ConnectClientSuccess) deserialize).getAlternates().size());
-        Assert.assertEquals(OBJECT.getBackend(), ((ConnectClientSuccess) deserialize).getBackend());
-        Assert.assertEquals(Optional.empty(), ((ConnectClientSuccess) deserialize).getDataTree());
-        Assert.assertEquals(OBJECT.getMaxMessages(), ((ConnectClientSuccess) deserialize).getMaxMessages());
+    protected void doAdditionalAssertions(final ConnectClientSuccess deserialize) {
+        assertEquals(OBJECT.getAlternates().size(), deserialize.getAlternates().size());
+        assertEquals(OBJECT.getBackend(), deserialize.getBackend());
+        assertEquals(Optional.empty(), deserialize.getDataTree());
+        assertEquals(OBJECT.getMaxMessages(), deserialize.getMaxMessages());
     }
 }
index 2682c9df5982f7cd56d80e3bd2def73968f5e76b..f3e8aa0db6f91a751f328bdbd0104aef364b2581 100644 (file)
@@ -7,32 +7,31 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class ExistsTransactionRequestTest extends AbstractReadTransactionRequestTest<ExistsTransactionRequest> {
-    private static final ExistsTransactionRequest OBJECT = new ExistsTransactionRequest(
-            TRANSACTION_IDENTIFIER, 0, ACTOR_REF, PATH, SNAPSHOT_ONLY);
+    private static final ExistsTransactionRequest OBJECT = new ExistsTransactionRequest(TRANSACTION_IDENTIFIER, 0,
+        ACTOR_REF, PATH, SNAPSHOT_ONLY);
 
-    @Override
-    protected ExistsTransactionRequest object() {
-        return OBJECT;
+    public ExistsTransactionRequestTest() {
+        super(OBJECT, 108);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final ABIVersion cloneVersion = ABIVersion.TEST_FUTURE_VERSION;
-        final ExistsTransactionRequest clone = OBJECT.cloneAsVersion(cloneVersion);
-        Assert.assertEquals(cloneVersion, clone.getVersion());
-        Assert.assertEquals(OBJECT.getPath(), clone.getPath());
-        Assert.assertEquals(OBJECT.isSnapshotOnly(), clone.isSnapshotOnly());
+        final var cloneVersion = ABIVersion.TEST_FUTURE_VERSION;
+        final var clone = OBJECT.cloneAsVersion(cloneVersion);
+        assertEquals(cloneVersion, clone.getVersion());
+        assertEquals(OBJECT.getPath(), clone.getPath());
+        assertEquals(OBJECT.isSnapshotOnly(), clone.isSnapshotOnly());
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof ExistsTransactionRequest);
-        Assert.assertEquals(OBJECT.getReplyTo(), ((ExistsTransactionRequest) deserialize).getReplyTo());
-        Assert.assertEquals(OBJECT.getPath(), ((ExistsTransactionRequest) deserialize).getPath());
+    protected void doAdditionalAssertions(final ExistsTransactionRequest deserialize) {
+        assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
+        assertEquals(OBJECT.getPath(), deserialize.getPath());
     }
 }
\ No newline at end of file
index e5a74a63854811c9a5db5db20c37bcf38342c720..e8ce28dedb69441f9a53710005c5bcfcddb13348 100644 (file)
@@ -7,43 +7,45 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+
 import com.google.common.base.MoreObjects;
-import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class ExistsTransactionSuccessTest extends AbstractTransactionSuccessTest<ExistsTransactionSuccess> {
     private static final boolean EXISTS = true;
 
-    private static final ExistsTransactionSuccess OBJECT = new ExistsTransactionSuccess(
-            TRANSACTION_IDENTIFIER, 0, EXISTS);
+    private static final ExistsTransactionSuccess OBJECT = new ExistsTransactionSuccess(TRANSACTION_IDENTIFIER, 0,
+        EXISTS);
 
-    @Override
-    protected ExistsTransactionSuccess object() {
-        return OBJECT;
+    public ExistsTransactionSuccessTest() {
+        super(OBJECT, 99);
     }
 
     @Test
     public void getExistsTest() {
-        final boolean result = OBJECT.getExists();
-        Assert.assertEquals(EXISTS, result);
+        assertEquals(EXISTS, OBJECT.getExists());
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final ExistsTransactionSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getExists(), clone.getExists());
     }
 
     @Test
     public void addToStringAttributesTest() {
-        final MoreObjects.ToStringHelper result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT));
-        Assert.assertTrue(result.toString().contains("exists=" + EXISTS));
+        final var result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT)).toString();
+        assertThat(result, containsString("exists=" + EXISTS));
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof ExistsTransactionSuccess);
-        Assert.assertEquals(OBJECT.getExists(), ((ExistsTransactionSuccess) deserialize).getExists());
+    protected void doAdditionalAssertions(final ExistsTransactionSuccess deserialize) {
+        assertEquals(OBJECT.getExists(), deserialize.getExists());
     }
 }
\ No newline at end of file
index c9d76f9b3d4591947978cf2f27e4bd8361285f98..13b9d6e8a19f0957bb875327e966e55c4908dad8 100644 (file)
@@ -7,21 +7,23 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class LocalHistoryFailureTest extends AbstractRequestFailureTest<LocalHistoryFailure> {
     private static final LocalHistoryFailure OBJECT = new LocalHistoryFailure(HISTORY_IDENTIFIER, 0, CAUSE);
 
-    @Override
-    LocalHistoryFailure object() {
-        return OBJECT;
+    public LocalHistoryFailureTest() {
+        super(OBJECT, 99);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final LocalHistoryFailure clone = OBJECT.cloneAsVersion(ABIVersion.current());
-        Assert.assertEquals(OBJECT, clone);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getCause(), clone.getCause());
     }
 }
\ No newline at end of file
index 875037bb000b9dc6268a16742aab263681c76938..8afca0072c75c7451d7a982202b24e3316252ea5 100644 (file)
@@ -8,7 +8,6 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
@@ -16,21 +15,15 @@ import org.opendaylight.controller.cluster.access.ABIVersion;
 public class LocalHistorySuccessTest extends AbstractRequestSuccessTest<LocalHistorySuccess> {
     private static final LocalHistorySuccess OBJECT = new LocalHistorySuccess(HISTORY_IDENTIFIER, 0);
 
-    @Override
-    protected LocalHistorySuccess object() {
-        return OBJECT;
+    public LocalHistorySuccessTest() {
+        super(OBJECT, 96);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final LocalHistorySuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        assertEquals(ABIVersion.BORON, clone.getVersion());
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(ABIVersion.TEST_FUTURE_VERSION, clone.getVersion());
         assertEquals(OBJECT.getSequence(), clone.getSequence());
         assertEquals(OBJECT.getTarget(), clone.getTarget());
     }
-
-    @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        assertTrue(deserialize instanceof LocalHistorySuccess);
-    }
 }
index 5f6f6454cece22aee1f4e550cb63956d9b1b3f5d..e424f37cf69061e26c85d1a6fa1bba067316f7b9 100644 (file)
@@ -24,11 +24,11 @@ import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 
 public class ModifyTransactionRequestBuilderTest {
-
     private final MemberName memberName = MemberName.forName("member-1");
     private final FrontendType frontendType = FrontendType.forName("test");
     private final FrontendIdentifier frontendId = FrontendIdentifier.create(memberName, frontendType);
@@ -36,10 +36,11 @@ public class ModifyTransactionRequestBuilderTest {
     private final TransactionIdentifier transactionIdentifier =
             new TransactionIdentifier(new LocalHistoryIdentifier(clientId, 0L), 0L);
     private final ActorRef actorRef = ActorSystem.create("test").actorOf(Props.create(TestActors.EchoActor.class));
-    private final NormalizedNode node = Builders.containerBuilder().withNodeIdentifier(
-            YangInstanceIdentifier.NodeIdentifier.create(QName.create("namespace", "localName"))).build();
+    private final NormalizedNode node = ImmutableNodes.newContainerBuilder()
+        .withNodeIdentifier(new NodeIdentifier(QName.create("namespace", "localName")))
+        .build();
     private final TransactionModification transactionModification =
-            new TransactionWrite(YangInstanceIdentifier.empty(), node);
+            new TransactionWrite(YangInstanceIdentifier.of(), node);
     private final ModifyTransactionRequestBuilder modifyTransactionRequestBuilder =
             new ModifyTransactionRequestBuilder(transactionIdentifier, actorRef);
 
@@ -52,38 +53,37 @@ public class ModifyTransactionRequestBuilderTest {
 
     @Test
     public void testGetIdentifier() {
-        final TransactionIdentifier identifier = modifyTransactionRequestBuilder.getIdentifier();
+        final var identifier = modifyTransactionRequestBuilder.getIdentifier();
         assertEquals(transactionIdentifier, identifier);
     }
 
     @Test
     public void testBuildReady() {
         modifyTransactionRequestBuilder.setReady();
-        final ModifyTransactionRequest modifyTransactionRequest = modifyTransactionRequestBuilder.build();
-        assertEquals(PersistenceProtocol.READY, modifyTransactionRequest.getPersistenceProtocol().get());
+        final var modifyTransactionRequest = modifyTransactionRequestBuilder.build();
+        assertEquals(PersistenceProtocol.READY, modifyTransactionRequest.getPersistenceProtocol().orElseThrow());
         assertEquals(transactionModification, modifyTransactionRequest.getModifications().get(0));
     }
 
     @Test
     public void testBuildAbort() {
         modifyTransactionRequestBuilder.setAbort();
-        final ModifyTransactionRequest modifyTransactionRequest = modifyTransactionRequestBuilder.build();
-        assertEquals(PersistenceProtocol.ABORT, modifyTransactionRequest.getPersistenceProtocol().get());
+        final var modifyTransactionRequest = modifyTransactionRequestBuilder.build();
+        assertEquals(PersistenceProtocol.ABORT, modifyTransactionRequest.getPersistenceProtocol().orElseThrow());
         assertTrue(modifyTransactionRequest.getModifications().isEmpty());
     }
 
     @Test
     public void testBuildCommitTrue() {
         modifyTransactionRequestBuilder.setCommit(true);
-        final ModifyTransactionRequest modifyTransactionRequest = modifyTransactionRequestBuilder.build();
-        assertEquals(PersistenceProtocol.THREE_PHASE, modifyTransactionRequest.getPersistenceProtocol().get());
+        final var modifyTransactionRequest = modifyTransactionRequestBuilder.build();
+        assertEquals(PersistenceProtocol.THREE_PHASE, modifyTransactionRequest.getPersistenceProtocol().orElseThrow());
     }
 
     @Test
     public void testBuildCommitFalse() {
         modifyTransactionRequestBuilder.setCommit(false);
-        final ModifyTransactionRequest modifyTransactionRequest = modifyTransactionRequestBuilder.build();
-        assertEquals(PersistenceProtocol.SIMPLE, modifyTransactionRequest.getPersistenceProtocol().get());
+        final var modifyTransactionRequest = modifyTransactionRequestBuilder.build();
+        assertEquals(PersistenceProtocol.SIMPLE, modifyTransactionRequest.getPersistenceProtocol().orElseThrow());
     }
-
 }
index 3f284e8fc4069ab7911d0368fa01d866528572f6..45ee0bd2584b5f5096d55bdab40410e26f94ceb6 100644 (file)
@@ -7,59 +7,55 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+
 import com.google.common.base.MoreObjects;
-import java.util.ArrayList;
 import java.util.List;
 import java.util.Optional;
-import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class ModifyTransactionRequestEmptyTest extends AbstractTransactionRequestTest<ModifyTransactionRequest> {
     private static final PersistenceProtocol PROTOCOL = PersistenceProtocol.ABORT;
+    private static final ModifyTransactionRequest OBJECT = new ModifyTransactionRequest(TRANSACTION_IDENTIFIER, 0,
+        ACTOR_REF, List.of(), PROTOCOL);
 
-    private static final ModifyTransactionRequest OBJECT = new ModifyTransactionRequest(
-            TRANSACTION_IDENTIFIER, 0, ACTOR_REF, new ArrayList<>(), PROTOCOL);
-
-    @Override
-    protected ModifyTransactionRequest object() {
-        return OBJECT;
+    public ModifyTransactionRequestEmptyTest() {
+        super(OBJECT, 108);
     }
 
     @Test
     public void getPersistenceProtocolTest() {
-        final Optional<PersistenceProtocol> result = OBJECT.getPersistenceProtocol();
-        Assert.assertTrue(result.isPresent());
-        Assert.assertEquals(PROTOCOL, result.get());
+        assertEquals(Optional.of(PROTOCOL), OBJECT.getPersistenceProtocol());
     }
 
     @Test
     public void getModificationsTest() {
-        final List<TransactionModification> result = OBJECT.getModifications();
-        Assert.assertNotNull(result);
-        Assert.assertTrue(result.isEmpty());
+        assertEquals(List.of(), OBJECT.getModifications());
     }
 
     @Test
     public void addToStringAttributesTest() {
-        final MoreObjects.ToStringHelper result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT));
-        Assert.assertTrue(result.toString().contains("modifications=0"));
-        Assert.assertTrue(result.toString().contains("protocol=" + PROTOCOL));
+        final var result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT)).toString();
+        assertThat(result, containsString("modifications=0"));
+        assertThat(result, containsString("protocol=" + PROTOCOL));
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final ModifyTransactionRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getReplyTo(), clone.getReplyTo());
+        assertEquals(OBJECT.getPersistenceProtocol(), clone.getPersistenceProtocol());
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof ModifyTransactionRequest);
-        final ModifyTransactionRequest casted = (ModifyTransactionRequest) deserialize;
-
-        Assert.assertEquals(OBJECT.getReplyTo(), casted.getReplyTo());
-        Assert.assertEquals(OBJECT.getModifications(), casted.getModifications());
-        Assert.assertEquals(OBJECT.getPersistenceProtocol(), casted.getPersistenceProtocol());
+    protected void doAdditionalAssertions(final ModifyTransactionRequest deserialize) {
+        assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
+        assertEquals(OBJECT.getModifications(), deserialize.getModifications());
+        assertEquals(OBJECT.getPersistenceProtocol(), deserialize.getPersistenceProtocol());
     }
 }
\ No newline at end of file
index 7d0164f8674f0fce70e258b6dcb0019862dfd7f5..e3039d319ef219b919f9d5bd6dc42466dc217a33 100644 (file)
@@ -7,80 +7,75 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
 import static org.opendaylight.controller.cluster.access.commands.TransactionModification.TYPE_WRITE;
 
 import com.google.common.base.MoreObjects;
-import com.google.common.collect.Lists;
 import java.util.List;
 import java.util.Optional;
-import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 
 public class ModifyTransactionRequestTest extends AbstractTransactionRequestTest<ModifyTransactionRequest> {
-    private static final ContainerNode NODE = Builders.containerBuilder().withNodeIdentifier(
-            NodeIdentifier.create(QName.create("namespace", "localName"))).build();
+    private static final ContainerNode NODE = ImmutableNodes.newContainerBuilder()
+        .withNodeIdentifier(new NodeIdentifier(QName.create("namespace", "localName")))
+        .build();
 
-    private static final List<TransactionModification> MODIFICATIONS = Lists.newArrayList(
-            new TransactionWrite(YangInstanceIdentifier.empty(), NODE));
+    private static final List<TransactionModification> MODIFICATIONS = List.of(
+            new TransactionWrite(YangInstanceIdentifier.of(), NODE));
 
     private static final PersistenceProtocol PROTOCOL = PersistenceProtocol.ABORT;
 
-    private static final ModifyTransactionRequest OBJECT = new ModifyTransactionRequest(
-            TRANSACTION_IDENTIFIER, 0, ACTOR_REF, MODIFICATIONS, PROTOCOL);
+    private static final ModifyTransactionRequest OBJECT = new ModifyTransactionRequest(TRANSACTION_IDENTIFIER, 0,
+        ACTOR_REF, MODIFICATIONS, PROTOCOL);
 
-    @Override
-    protected ModifyTransactionRequest object() {
-        return OBJECT;
+    public ModifyTransactionRequestTest() {
+        super(OBJECT, 140);
     }
 
     @Test
     public void getPersistenceProtocolTest() {
-        final Optional<PersistenceProtocol> result = OBJECT.getPersistenceProtocol();
-        assertTrue(result.isPresent());
-        assertEquals(PROTOCOL, result.get());
+        assertEquals(Optional.of(PROTOCOL), OBJECT.getPersistenceProtocol());
     }
 
     @Test
     public void getModificationsTest() {
-        final List<TransactionModification> result = OBJECT.getModifications();
-        assertNotNull(result);
-        assertEquals(MODIFICATIONS, result);
+        assertEquals(MODIFICATIONS, OBJECT.getModifications());
     }
 
     @Test
     public void addToStringAttributesTest() {
-        final MoreObjects.ToStringHelper result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT));
-        assertTrue(result.toString().contains("modifications=1"));
-        assertTrue(result.toString().contains("protocol=" + PROTOCOL));
+        final var result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT)).toString();
+        assertThat(result, containsString("modifications=1"));
+        assertThat(result, containsString("protocol=" + PROTOCOL));
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final ModifyTransactionRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getReplyTo(), clone.getReplyTo());
+        assertEquals(OBJECT.getModifications(), clone.getModifications());
+        assertEquals(OBJECT.getPersistenceProtocol(), clone.getPersistenceProtocol());
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        assertTrue(deserialize instanceof ModifyTransactionRequest);
-        final ModifyTransactionRequest casted = (ModifyTransactionRequest) deserialize;
-
-        assertEquals(OBJECT.getReplyTo(), casted.getReplyTo());
-        assertEquals(OBJECT.getPersistenceProtocol(), casted.getPersistenceProtocol());
-
-        assertNotNull(casted.getModifications());
-        assertEquals(1, casted.getModifications().size());
-        final TransactionModification modification = casted.getModifications().get(0);
-        assertEquals(YangInstanceIdentifier.empty(), modification.getPath());
+    protected void doAdditionalAssertions(final ModifyTransactionRequest deserialize) {
+        assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
+        assertEquals(OBJECT.getPersistenceProtocol(), deserialize.getPersistenceProtocol());
+        assertNotNull(deserialize.getModifications());
+        assertEquals(1, deserialize.getModifications().size());
+        final var modification = deserialize.getModifications().get(0);
+        assertEquals(YangInstanceIdentifier.of(), modification.getPath());
         assertEquals(TYPE_WRITE, modification.getType());
     }
 }
index 3cd462693381f0cf9bd1384eb9aa5c78f4ba280f..8fb470d765e6fbe47b2a4c9c53f8dc82240265e5 100644 (file)
@@ -8,7 +8,6 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
@@ -16,21 +15,15 @@ import org.opendaylight.controller.cluster.access.ABIVersion;
 public class ModifyTransactionSuccessTest extends AbstractTransactionSuccessTest<ModifyTransactionSuccess> {
     private static final ModifyTransactionSuccess OBJECT = new ModifyTransactionSuccess(TRANSACTION_IDENTIFIER, 0);
 
-    @Override
-    protected ModifyTransactionSuccess object() {
-        return OBJECT;
+    public ModifyTransactionSuccessTest() {
+        super(OBJECT, 98);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final ModifyTransactionSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        assertEquals(ABIVersion.BORON, clone.getVersion());
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(ABIVersion.TEST_FUTURE_VERSION, clone.getVersion());
         assertEquals(OBJECT.getSequence(), clone.getSequence());
         assertEquals(OBJECT.getTarget(), clone.getTarget());
     }
-
-    @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        assertTrue(deserialize instanceof ModifyTransactionSuccess);
-    }
 }
\ No newline at end of file
index c4096833ca282f88c19b97791c8bb3986f1cb9b7..10f4f0eb7834208e21c51d344b73c5db06cb6a95 100644 (file)
@@ -7,32 +7,31 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class ReadTransactionRequestTest extends AbstractReadTransactionRequestTest<ReadTransactionRequest> {
-    private static final ReadTransactionRequest OBJECT = new ReadTransactionRequest(
-            TRANSACTION_IDENTIFIER, 0, ACTOR_REF, PATH, SNAPSHOT_ONLY);
+    private static final ReadTransactionRequest OBJECT = new ReadTransactionRequest(TRANSACTION_IDENTIFIER, 0,
+        ACTOR_REF, PATH, SNAPSHOT_ONLY);
 
-    @Override
-    protected ReadTransactionRequest object() {
-        return OBJECT;
+    public ReadTransactionRequestTest() {
+        super(OBJECT, 108);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final ABIVersion cloneVersion = ABIVersion.TEST_FUTURE_VERSION;
-        final ReadTransactionRequest clone = OBJECT.cloneAsVersion(cloneVersion);
-        Assert.assertEquals(cloneVersion, clone.getVersion());
-        Assert.assertEquals(OBJECT.getPath(), clone.getPath());
-        Assert.assertEquals(OBJECT.isSnapshotOnly(), clone.isSnapshotOnly());
+        final var cloneVersion = ABIVersion.TEST_FUTURE_VERSION;
+        final var clone = OBJECT.cloneAsVersion(cloneVersion);
+        assertEquals(cloneVersion, clone.getVersion());
+        assertEquals(OBJECT.getPath(), clone.getPath());
+        assertEquals(OBJECT.isSnapshotOnly(), clone.isSnapshotOnly());
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof ReadTransactionRequest);
-        Assert.assertEquals(OBJECT.getReplyTo(), ((ReadTransactionRequest) deserialize).getReplyTo());
-        Assert.assertEquals(OBJECT.getPath(), ((ReadTransactionRequest) deserialize).getPath());
+    protected void doAdditionalAssertions(final ReadTransactionRequest deserialize) {
+        assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
+        assertEquals(OBJECT.getPath(), deserialize.getPath());
     }
 }
\ No newline at end of file
index 51257462b636c0aaf5397104d79dd27380d452cf..055b6f5c88e94522a923621fef0398d0f7958190 100644 (file)
@@ -8,38 +8,34 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
 
 import java.util.Optional;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
 public class ReadTransactionSuccessNoDataTest extends AbstractTransactionSuccessTest<ReadTransactionSuccess> {
-    private static final ReadTransactionSuccess OBJECT = new ReadTransactionSuccess(
-            TRANSACTION_IDENTIFIER, 0, Optional.empty());
+    private static final ReadTransactionSuccess OBJECT = new ReadTransactionSuccess(TRANSACTION_IDENTIFIER, 0,
+        Optional.empty());
 
-    @Override
-    protected ReadTransactionSuccess object() {
-        return OBJECT;
+    public ReadTransactionSuccessNoDataTest() {
+        super(OBJECT, 99);
     }
 
     @Test
     public void getDataTest() {
-        final Optional<NormalizedNode> result = OBJECT.getData();
-        assertFalse(result.isPresent());
+        assertEquals(Optional.empty(), OBJECT.getData());
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final ReadTransactionSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        assertEquals(OBJECT, clone);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getData(), clone.getData());
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        assertTrue(deserialize instanceof ReadTransactionSuccess);
-        assertEquals(OBJECT.getData(), ((ReadTransactionSuccess) deserialize).getData());
+    protected void doAdditionalAssertions(final ReadTransactionSuccess deserialize) {
+        assertEquals(OBJECT.getData(), deserialize.getData());
     }
 }
index 4bda8c8016da4243e8d6f9a7c99d0cbc21f40817..4557897ae5a52b5c69a19f4be451bca7d1466b25 100644 (file)
@@ -8,7 +8,6 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 
 import java.util.Optional;
 import org.junit.Test;
@@ -16,37 +15,35 @@ import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 
 public class ReadTransactionSuccessTest extends AbstractTransactionSuccessTest<ReadTransactionSuccess> {
-    private static final ContainerNode NODE = Builders.containerBuilder().withNodeIdentifier(
-            NodeIdentifier.create(QName.create("namespace", "localName"))).build();
+    private static final ContainerNode NODE = ImmutableNodes.newContainerBuilder()
+        .withNodeIdentifier(new NodeIdentifier(QName.create("namespace", "localName")))
+        .build();
 
-    private static final ReadTransactionSuccess OBJECT = new ReadTransactionSuccess(
-            TRANSACTION_IDENTIFIER, 0, Optional.of(NODE));
+    private static final ReadTransactionSuccess OBJECT = new ReadTransactionSuccess(TRANSACTION_IDENTIFIER, 0,
+        Optional.of(NODE));
 
-    @Override
-    protected ReadTransactionSuccess object() {
-        return OBJECT;
+    public ReadTransactionSuccessTest() {
+        super(OBJECT, 129);
     }
 
     @Test
     public void getDataTest() {
-        final Optional<NormalizedNode> result = OBJECT.getData();
-        assertTrue(result.isPresent());
-        assertEquals(NODE.body(), result.get().body());
+        assertEquals(Optional.of(NODE), OBJECT.getData());
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final ReadTransactionSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        assertEquals(OBJECT, clone);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getData(), clone.getData());
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        assertTrue(deserialize instanceof ReadTransactionSuccess);
-        assertEquals(OBJECT.getData(), ((ReadTransactionSuccess) deserialize).getData());
+    protected void doAdditionalAssertions(final ReadTransactionSuccess deserialize) {
+        assertEquals(OBJECT.getData(), deserialize.getData());
     }
 }
index 39076e30072b1cb95e175ce51ac41fe66b6e6ac7..27b30d9e8b67290539fe2f5d99d871dbfe6b6b9c 100644 (file)
@@ -7,8 +7,6 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import static org.hamcrest.CoreMatchers.instanceOf;
-import static org.hamcrest.MatcherAssert.assertThat;
 import static org.junit.Assert.assertEquals;
 
 import com.google.common.primitives.UnsignedLong;
@@ -17,23 +15,23 @@ import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class SkipTransactionsRequestTest extends AbstractTransactionRequestTest<SkipTransactionsRequest> {
-    private static final SkipTransactionsRequest OBJECT = new SkipTransactionsRequest(
-            TRANSACTION_IDENTIFIER, 0, ACTOR_REF, List.of(UnsignedLong.ONE));
+    private static final SkipTransactionsRequest OBJECT = new SkipTransactionsRequest(TRANSACTION_IDENTIFIER, 0,
+        ACTOR_REF, List.of(UnsignedLong.ONE));
 
-    @Override
-    protected SkipTransactionsRequest object() {
-        return OBJECT;
+    public SkipTransactionsRequestTest() {
+        super(OBJECT, 109);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final SkipTransactionsRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        assertEquals(OBJECT, clone);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getReplyTo(), clone.getReplyTo());
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        assertThat(deserialize, instanceOf(SkipTransactionsRequest.class));
-        assertEquals(OBJECT.getReplyTo(), ((SkipTransactionsRequest) deserialize).getReplyTo());
+    protected void doAdditionalAssertions(final SkipTransactionsRequest deserialize) {
+        assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
     }
 }
\ No newline at end of file
index be70ad96ea8e9353d67c4a5bf208fa41be416909..3ff798d23120230e4e3ce3e6ac4e124a655d6e13 100644 (file)
@@ -7,30 +7,22 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import static org.hamcrest.CoreMatchers.instanceOf;
-import static org.hamcrest.MatcherAssert.assertThat;
 import static org.junit.Assert.assertEquals;
 
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class SkipTransactionsResponseTest extends AbstractTransactionSuccessTest<SkipTransactionsResponse> {
-    private static final SkipTransactionsResponse OBJECT = new SkipTransactionsResponse(
-            TRANSACTION_IDENTIFIER, 0);
+    private static final SkipTransactionsResponse OBJECT = new SkipTransactionsResponse(TRANSACTION_IDENTIFIER, 0);
 
-    @Override
-    protected SkipTransactionsResponse object() {
-        return OBJECT;
+    public SkipTransactionsResponseTest() {
+        super(OBJECT, 98);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final SkipTransactionsResponse clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        assertEquals(OBJECT, clone);
-    }
-
-    @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        assertThat(deserialize, instanceOf(SkipTransactionsResponse.class));
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
     }
 }
\ No newline at end of file
index 9c7dbf11d729bdc97eb05f64a23258e8b3bf1c9b..c0e1ae8e1f221c563fa7292ea6c6a3cc381acfb3 100644 (file)
@@ -7,28 +7,29 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class TransactionAbortRequestTest extends AbstractTransactionRequestTest<TransactionAbortRequest> {
-    private static final TransactionAbortRequest OBJECT = new TransactionAbortRequest(
-            TRANSACTION_IDENTIFIER, 0, ACTOR_REF);
+    private static final TransactionAbortRequest OBJECT = new TransactionAbortRequest(TRANSACTION_IDENTIFIER, 0,
+        ACTOR_REF);
 
-    @Override
-    protected TransactionAbortRequest object() {
-        return OBJECT;
+    public TransactionAbortRequestTest() {
+        super(OBJECT, 101);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final TransactionAbortRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getReplyTo(), clone.getReplyTo());
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof TransactionAbortRequest);
-        Assert.assertEquals(OBJECT.getReplyTo(), ((TransactionAbortRequest)deserialize).getReplyTo());
+    protected void doAdditionalAssertions(final TransactionAbortRequest deserialize) {
+        assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
     }
 }
\ No newline at end of file
index 31959aaf3e18022be887cfb9f645ad1d6f730b25..1ceab66a9557810ac0c460da5ad8a0f23ebd53f6 100644 (file)
@@ -7,27 +7,22 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class TransactionAbortSuccessTest extends AbstractTransactionSuccessTest<TransactionAbortSuccess> {
-    private static final TransactionAbortSuccess OBJECT = new TransactionAbortSuccess(
-            TRANSACTION_IDENTIFIER, 0);
+    private static final TransactionAbortSuccess OBJECT = new TransactionAbortSuccess(TRANSACTION_IDENTIFIER, 0);
 
-    @Override
-    protected TransactionAbortSuccess object() {
-        return OBJECT;
+    public TransactionAbortSuccessTest() {
+        super(OBJECT, 98);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final TransactionAbortSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
-    }
-
-    @Override
-    protected void doAdditionalAssertions(Object deserialize) {
-        Assert.assertTrue(deserialize instanceof TransactionAbortSuccess);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
     }
 }
\ No newline at end of file
index e8995a9e24737a3f35a9f99407725d40122e2048..ee84907d5413d246e2ecca90f99e138aabdb8a88 100644 (file)
@@ -7,27 +7,23 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class TransactionCanCommitSuccessTest extends AbstractTransactionSuccessTest<TransactionCanCommitSuccess> {
-    private static final TransactionCanCommitSuccess OBJECT = new TransactionCanCommitSuccess(
-            TRANSACTION_IDENTIFIER, 0);
+    private static final TransactionCanCommitSuccess OBJECT = new TransactionCanCommitSuccess(TRANSACTION_IDENTIFIER,
+        0);
 
-    @Override
-    protected TransactionCanCommitSuccess object() {
-        return OBJECT;
+    public TransactionCanCommitSuccessTest() {
+        super(OBJECT, 99);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final TransactionCanCommitSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
-    }
-
-    @Override
-    protected void doAdditionalAssertions(Object deserialize) {
-        Assert.assertTrue(deserialize instanceof TransactionCanCommitSuccess);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
     }
 }
\ No newline at end of file
index 9db578d2b6d82e2f444c44da9812716e57196ff2..ca1f8f8dd920d13ef7db101ea78ef1e4415f052a 100644 (file)
@@ -7,27 +7,22 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class TransactionCommitSuccessTest extends AbstractTransactionSuccessTest<TransactionCommitSuccess> {
-    private static final TransactionCommitSuccess OBJECT = new TransactionCommitSuccess(
-            TRANSACTION_IDENTIFIER, 0);
+    private static final TransactionCommitSuccess OBJECT = new TransactionCommitSuccess(TRANSACTION_IDENTIFIER, 0);
 
-    @Override
-    protected TransactionCommitSuccess object() {
-        return OBJECT;
+    public TransactionCommitSuccessTest() {
+        super(OBJECT, 98);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final TransactionCommitSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
-    }
-
-    @Override
-    protected void doAdditionalAssertions(Object deserialize) {
-        Assert.assertTrue(deserialize instanceof TransactionCommitSuccess);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
     }
-}
\ No newline at end of file
+}
index 26f1a379bbef7cf1e89f38e321fee0831f606a6f..a5b3401a7f92aabacecdee947646f46ee683e26e 100644 (file)
@@ -7,28 +7,29 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class TransactionDoCommitRequestTest extends AbstractTransactionRequestTest<TransactionDoCommitRequest> {
-    private static final TransactionDoCommitRequest OBJECT = new TransactionDoCommitRequest(
-            TRANSACTION_IDENTIFIER, 0, ACTOR_REF);
+    private static final TransactionDoCommitRequest OBJECT = new TransactionDoCommitRequest(TRANSACTION_IDENTIFIER, 0,
+        ACTOR_REF);
 
-    @Override
-    protected TransactionDoCommitRequest object() {
-        return OBJECT;
+    public TransactionDoCommitRequestTest() {
+        super(OBJECT, 102);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final TransactionDoCommitRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getReplyTo(), clone.getReplyTo());
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof TransactionDoCommitRequest);
-        Assert.assertEquals(OBJECT.getReplyTo(), ((TransactionDoCommitRequest) deserialize).getReplyTo());
+    protected void doAdditionalAssertions(final TransactionDoCommitRequest deserialize) {
+        assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
     }
 }
\ No newline at end of file
index 9e8467e5ca3425d381d0d03777f3c24a97316bf1..7e027ea2d396e8f8525e7e2aac95b8e951e9cee6 100644 (file)
@@ -7,21 +7,23 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class TransactionFailureTest extends AbstractRequestFailureTest<TransactionFailure> {
     private static final TransactionFailure OBJECT = new TransactionFailure(TRANSACTION_IDENTIFIER, 0, CAUSE);
 
-    @Override
-    TransactionFailure object() {
-        return OBJECT;
+    public TransactionFailureTest() {
+        super(OBJECT, 100);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final TransactionFailure clone = OBJECT.cloneAsVersion(ABIVersion.current());
-        Assert.assertEquals(OBJECT, clone);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getCause(), clone.getCause());
     }
 }
\ No newline at end of file
index ceac8606b87b55ed9063f08e350e35aef68ad612..21605372c2d2074ac94c2e9c86cda77ebe9539dd 100644 (file)
@@ -7,28 +7,29 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class TransactionPreCommitRequestTest extends AbstractTransactionRequestTest<TransactionPreCommitRequest> {
-    private static final TransactionPreCommitRequest OBJECT = new TransactionPreCommitRequest(
-            TRANSACTION_IDENTIFIER, 0, ACTOR_REF);
+    private static final TransactionPreCommitRequest OBJECT = new TransactionPreCommitRequest(TRANSACTION_IDENTIFIER, 0,
+        ACTOR_REF);
 
-    @Override
-    protected TransactionPreCommitRequest object() {
-        return OBJECT;
+    public TransactionPreCommitRequestTest() {
+        super(OBJECT, 102);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final TransactionPreCommitRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getReplyTo(), clone.getReplyTo());
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof TransactionPreCommitRequest);
-        Assert.assertEquals(OBJECT.getReplyTo(), ((TransactionPreCommitRequest) deserialize).getReplyTo());
+    protected void doAdditionalAssertions(final TransactionPreCommitRequest deserialize) {
+        assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
     }
 }
\ No newline at end of file
index 0130ea06ab8c515297854604452c64d73f29016b..5f8f29f45078da8c01c3265ade5c2cd52a4db1d8 100644 (file)
@@ -7,27 +7,23 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class TransactionPreCommitSuccessTest extends AbstractTransactionSuccessTest<TransactionPreCommitSuccess> {
-    private static final TransactionPreCommitSuccess OBJECT = new TransactionPreCommitSuccess(
-            TRANSACTION_IDENTIFIER, 0);
+    private static final TransactionPreCommitSuccess OBJECT = new TransactionPreCommitSuccess(TRANSACTION_IDENTIFIER,
+        0);
 
-    @Override
-    protected TransactionPreCommitSuccess object() {
-        return OBJECT;
+    public TransactionPreCommitSuccessTest() {
+        super(OBJECT, 99);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final TransactionPreCommitSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
-    }
-
-    @Override
-    protected void doAdditionalAssertions(Object deserialize) {
-        Assert.assertTrue(deserialize instanceof TransactionPreCommitSuccess);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
     }
 }
\ No newline at end of file
index 5ae9f26dbf5605aa6239a8eea8099979086378f5..7453f4461a5f87146c45f398e113142db9c56f4e 100644 (file)
@@ -7,28 +7,29 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class TransactionPurgeRequestTest extends AbstractTransactionRequestTest<TransactionPurgeRequest> {
-    private static final TransactionPurgeRequest OBJECT = new TransactionPurgeRequest(
-            TRANSACTION_IDENTIFIER, 0, ACTOR_REF);
+    private static final TransactionPurgeRequest OBJECT = new TransactionPurgeRequest(TRANSACTION_IDENTIFIER, 0,
+        ACTOR_REF);
 
-    @Override
-    protected TransactionPurgeRequest object() {
-        return OBJECT;
+    public TransactionPurgeRequestTest() {
+        super(OBJECT, 101);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final TransactionPurgeRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getReplyTo(), clone.getReplyTo());
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof TransactionPurgeRequest);
-        Assert.assertEquals(OBJECT.getReplyTo(), ((TransactionPurgeRequest) deserialize).getReplyTo());
+    protected void doAdditionalAssertions(final TransactionPurgeRequest deserialize) {
+        assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
     }
 }
\ No newline at end of file
index bef9ae140bde60e9d8b13f5704e6b6964da743cf..e8b4294d5e828e3560a62af0603d3d22a10ffcd0 100644 (file)
@@ -7,27 +7,22 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class TransactionPurgeResponseTest extends AbstractTransactionSuccessTest<TransactionPurgeResponse> {
-    private static final TransactionPurgeResponse OBJECT = new TransactionPurgeResponse(
-            TRANSACTION_IDENTIFIER, 0);
+    private static final TransactionPurgeResponse OBJECT = new TransactionPurgeResponse(TRANSACTION_IDENTIFIER, 0);
 
-    @Override
-    protected TransactionPurgeResponse object() {
-        return OBJECT;
+    public TransactionPurgeResponseTest() {
+        super(OBJECT, 98);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final TransactionPurgeResponse clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
-    }
-
-    @Override
-    protected void doAdditionalAssertions(Object deserialize) {
-        Assert.assertTrue(deserialize instanceof TransactionPurgeResponse);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
     }
 }
\ No newline at end of file
index f0f5d3d26c56a7c557bd74e21dd294b620feaada..fc34fc39787e8b1d4e21e8c4840616960ca23a20 100644 (file)
@@ -7,12 +7,18 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
-import org.apache.commons.lang.SerializationUtils;
-import org.junit.Assert;
+import static java.util.Objects.requireNonNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Before;
 import org.junit.Test;
 
 public abstract class AbstractEnvelopeTest<E extends Envelope<?>> {
+    protected record EnvelopeDetails<E extends Envelope<?>>(E envelope, int expectedSize) {
+        // Nothing else
+    }
+
     private static final FrontendIdentifier FRONTEND =
             new FrontendIdentifier(MemberName.forName("test"), FrontendIdentifierTest.ONE_FRONTEND_TYPE);
     private static final ClientIdentifier CLIENT = new ClientIdentifier(FRONTEND, 0);
@@ -20,33 +26,37 @@ public abstract class AbstractEnvelopeTest<E extends Envelope<?>> {
     protected static final TransactionIdentifier OBJECT = new TransactionIdentifier(HISTORY, 0);
 
     private E envelope;
+    private int expectedSize;
 
     @Before
     public void setUp() throws Exception {
-        envelope = createEnvelope();
+        final var details = createEnvelope();
+        envelope = requireNonNull(details.envelope);
+        expectedSize = details.expectedSize;
     }
 
     @Test
     public void testProxySerializationDeserialization() {
         final byte[] serializedBytes = SerializationUtils.serialize(envelope);
-        final Object deserialize = SerializationUtils.deserialize(serializedBytes);
-        checkDeserialized((E) deserialize);
+        assertEquals(expectedSize, serializedBytes.length);
+        @SuppressWarnings("unchecked")
+        final E deserialize = (E) SerializationUtils.deserialize(serializedBytes);
+        checkDeserialized(deserialize);
     }
 
     private void checkDeserialized(final E deserializedEnvelope) {
-        Assert.assertEquals(envelope.getSessionId(), deserializedEnvelope.getSessionId());
-        Assert.assertEquals(envelope.getTxSequence(), deserializedEnvelope.getTxSequence());
-        final Message<?, ?> expectedMessage = envelope.getMessage();
-        final Message<?, ?> actualMessage = deserializedEnvelope.getMessage();
-        Assert.assertEquals(expectedMessage.getSequence(), actualMessage.getSequence());
-        Assert.assertEquals(expectedMessage.getTarget(), actualMessage.getTarget());
-        Assert.assertEquals(expectedMessage.getVersion(), actualMessage.getVersion());
-        Assert.assertEquals(expectedMessage.getClass(), actualMessage.getClass());
+        assertEquals(envelope.getSessionId(), deserializedEnvelope.getSessionId());
+        assertEquals(envelope.getTxSequence(), deserializedEnvelope.getTxSequence());
+        final var expectedMessage = envelope.getMessage();
+        final var actualMessage = deserializedEnvelope.getMessage();
+        assertEquals(expectedMessage.getSequence(), actualMessage.getSequence());
+        assertEquals(expectedMessage.getTarget(), actualMessage.getTarget());
+        assertEquals(expectedMessage.getVersion(), actualMessage.getVersion());
+        assertEquals(expectedMessage.getClass(), actualMessage.getClass());
         doAdditionalAssertions(envelope, deserializedEnvelope);
     }
 
-    protected abstract E createEnvelope();
+    protected abstract EnvelopeDetails<E> createEnvelope();
 
-    @SuppressWarnings("checkstyle:hiddenField")
     protected abstract void doAdditionalAssertions(E envelope, E resolvedObject);
 }
index 8fcc9fa1f5ffe1f7eabf3326f65982ac56ed6b64..74cd4cf3ba182d2b3ee3c805dba0b33991589f25 100644 (file)
@@ -42,7 +42,6 @@ public abstract class AbstractIdentifierTest<T extends Identifier> {
         assertEquals(object().hashCode(), equalObject().hashCode());
     }
 
-
     @Test
     public final void testSerialization() throws Exception {
         assertTrue(object().equals(copy(object())));
index c65c1956c2a9ee1c12dae42760cbc41db6223665..48ceabef81cd87259df989c49ac9cf917350c6cb 100644 (file)
@@ -7,22 +7,37 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
+import static java.util.Objects.requireNonNull;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.actor.ExtendedActorSystem;
 import akka.serialization.JavaSerializer;
 import akka.testkit.TestProbe;
 import com.google.common.base.MoreObjects;
-import org.apache.commons.lang.SerializationUtils;
-import org.junit.Assert;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Before;
 import org.junit.Test;
 
 public abstract class AbstractRequestTest<T extends Request<?, T>> {
     private static final ActorSystem SYSTEM = ActorSystem.create("test");
     protected static final ActorRef ACTOR_REF = TestProbe.apply(SYSTEM).ref();
+    private static final int ACTOR_REF_SIZE = ACTOR_REF.path().toSerializationFormat().length();
+
+    private final T object;
+    private final int expectedSize;
 
-    protected abstract T object();
+    protected AbstractRequestTest(final T object, final int baseSize) {
+        this.object = requireNonNull(object);
+        this.expectedSize = baseSize + ACTOR_REF_SIZE;
+    }
+
+    protected final T object() {
+        return object;
+    }
 
     @Before
     public void setUp() {
@@ -31,25 +46,27 @@ public abstract class AbstractRequestTest<T extends Request<?, T>> {
 
     @Test
     public void getReplyToTest() {
-        Assert.assertEquals(ACTOR_REF, object().getReplyTo());
+        assertEquals(ACTOR_REF, object.getReplyTo());
     }
 
     @Test
     public void addToStringAttributesCommonTest() {
-        final MoreObjects.ToStringHelper result = object().addToStringAttributes(MoreObjects.toStringHelper(object()));
-        Assert.assertTrue(result.toString().contains("replyTo=" + ACTOR_REF));
+        final var result = object.addToStringAttributes(MoreObjects.toStringHelper(object));
+        assertThat(result.toString(), containsString("replyTo=" + ACTOR_REF));
     }
 
-    @SuppressWarnings("unchecked")
     @Test
     public void serializationTest() {
-        final Object deserialize = SerializationUtils.clone(object());
+        final byte[] bytes = SerializationUtils.serialize(object);
+        assertEquals(expectedSize, bytes.length);
+        @SuppressWarnings("unchecked")
+        final T deserialize = (T) SerializationUtils.deserialize(bytes);
 
-        Assert.assertEquals(object().getTarget(), ((T) deserialize).getTarget());
-        Assert.assertEquals(object().getVersion(), ((T) deserialize).getVersion());
-        Assert.assertEquals(object().getSequence(), ((T) deserialize).getSequence());
+        assertEquals(object.getTarget(), deserialize.getTarget());
+        assertEquals(object.getVersion(), deserialize.getVersion());
+        assertEquals(object.getSequence(), deserialize.getSequence());
         doAdditionalAssertions(deserialize);
     }
 
-    protected abstract void doAdditionalAssertions(Object deserialize);
+    protected abstract void doAdditionalAssertions(T deserialize);
 }
index 0908659487cec6282eab05fcfd217a5bbd0813f2..d9bd5c126b87704243fb56282f8f84e0a9e1b7cf 100644 (file)
@@ -32,6 +32,6 @@ public class ClientIdentifierTest extends AbstractIdentifierTest<ClientIdentifie
 
     @Override
     int expectedSize() {
-        return 114;
+        return 94;
     }
 }
index 733c3c7525041ee316e74264172df73976b39d4c..70132a6c05d0d2bb4da906d3358d5cae6512701b 100644 (file)
@@ -7,32 +7,39 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
 import static org.junit.Assert.assertEquals;
 
 import java.io.DataInput;
 import java.io.IOException;
+import org.apache.commons.lang3.SerializationUtils;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.yangtools.concepts.WritableIdentifier;
 
 public class FailureEnvelopeTest extends AbstractEnvelopeTest<FailureEnvelope> {
-
     @Override
-    protected FailureEnvelope createEnvelope() {
-        final RequestFailure<?, ?> message =
-                new MockFailure(OBJECT, new RuntimeRequestException("msg", new RuntimeException()), 42);
-        return new FailureEnvelope(message, 1L, 2L, 11L);
+    protected EnvelopeDetails<FailureEnvelope> createEnvelope() {
+        final var cause = new RuntimeRequestException("msg", new RuntimeException());
+        final int causeSize = SerializationUtils.serialize(cause).length;
+        return new EnvelopeDetails<>(new FailureEnvelope(new MockFailure(OBJECT, cause, 42), 1L, 2L, 11L),
+            causeSize + 216);
     }
 
     @Override
     protected void doAdditionalAssertions(final FailureEnvelope envelope, final FailureEnvelope resolvedObject) {
         assertEquals(envelope.getExecutionTimeNanos(), resolvedObject.getExecutionTimeNanos());
-        final RequestException expectedCause = envelope.getMessage().getCause();
-        final RequestException actualCause = resolvedObject.getMessage().getCause();
+        final var expectedCause = envelope.getMessage().getCause();
+        final var actualCause = resolvedObject.getMessage().getCause();
         assertEquals(expectedCause.getMessage(), actualCause.getMessage());
         assertEquals(expectedCause.isRetriable(), actualCause.isRetriable());
     }
 
-    private static class MockRequestFailureProxy extends AbstractRequestFailureProxy<WritableIdentifier, MockFailure> {
+    private static class MockRequestFailureProxy implements RequestFailure.SerialForm<WritableIdentifier, MockFailure> {
+        @java.io.Serial
+        private static final long serialVersionUID = 5015515628523887221L;
+
+        private MockFailure message;
 
         @SuppressWarnings("checkstyle:RedundantModifier")
         public MockRequestFailureProxy() {
@@ -40,23 +47,38 @@ public class FailureEnvelopeTest extends AbstractEnvelopeTest<FailureEnvelope> {
         }
 
         private MockRequestFailureProxy(final MockFailure mockFailure) {
-            super(mockFailure);
+            message = requireNonNull(mockFailure);
         }
 
         @Override
-        protected MockFailure createFailure(final WritableIdentifier target, final long sequence,
-                                            final RequestException failureCause) {
+        public MockFailure createFailure(final WritableIdentifier target, final long sequence,
+                final RequestException failureCause) {
             return new MockFailure(target, failureCause, sequence);
         }
 
         @Override
-        protected WritableIdentifier readTarget(final DataInput in) throws IOException {
+        public WritableIdentifier readTarget(final DataInput in) throws IOException {
             return TransactionIdentifier.readFrom(in);
         }
 
+        @Override
+        public MockFailure message() {
+            return verifyNotNull(message);
+        }
+
+        @Override
+        public void setMessage(final MockFailure message) {
+            this.message = requireNonNull(message);
+        }
+
+        @Override
+        public Object readResolve() {
+            return message();
+        }
     }
 
     private static class MockFailure extends RequestFailure<WritableIdentifier, MockFailure> {
+        @java.io.Serial
         private static final long serialVersionUID = 1L;
 
         MockFailure(final WritableIdentifier target, final RequestException cause, final long sequence) {
@@ -64,7 +86,7 @@ public class FailureEnvelopeTest extends AbstractEnvelopeTest<FailureEnvelope> {
         }
 
         @Override
-        protected AbstractRequestFailureProxy<WritableIdentifier, MockFailure> externalizableProxy(
+        protected RequestFailure.SerialForm<WritableIdentifier, MockFailure> externalizableProxy(
                 final ABIVersion version) {
             return new MockRequestFailureProxy(this);
         }
index cc7124483b4e2de4683077df62e44321af6b1ca7..203ffd5ab90a8c4b59589914eed8bbec180d051f 100644 (file)
@@ -33,6 +33,6 @@ public class FrontendIdentifierTest extends AbstractIdentifierTest<FrontendIdent
 
     @Override
     int expectedSize() {
-        return 115;
+        return 93;
     }
 }
index 0cfd887d565dff9bc32f7aa136a795180b9d6499..904a27f2e51721f5f0af0e5ed520ba06c0ee46e3 100644 (file)
@@ -35,7 +35,7 @@ public class FrontendTypeTest extends AbstractIdentifierTest<FrontendType> {
 
     @Override
     int expectedSize() {
-        return 104;
+        return 88;
     }
 
     @Test
index 136aa5a0f31245aaffea0ae892ea798537e95fce..469916c68acc019de1606e7ca2d9883a98169400 100644 (file)
@@ -34,7 +34,7 @@ public class MemberNameTest extends AbstractIdentifierTest<MemberName> {
 
     @Override
     int expectedSize() {
-        return 101;
+        return 87;
     }
 
     @Test
index b63dc4c78c741d514a097d2c7bf01b2eb7000166..30366c99f1a52a9e1167eb24e93e68e9de58c796 100644 (file)
@@ -7,19 +7,21 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
+import static org.hamcrest.CoreMatchers.instanceOf;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.actor.ExtendedActorSystem;
 import akka.serialization.JavaSerializer;
 import akka.testkit.TestProbe;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Before;
 import org.opendaylight.controller.cluster.access.commands.TransactionPurgeRequest;
 import org.opendaylight.controller.cluster.access.commands.TransactionPurgeResponse;
 
 public class RequestEnvelopeTest extends AbstractEnvelopeTest<RequestEnvelope> {
-
     private ActorSystem system;
     private ActorRef replyTo;
     private TestProbe replyToProbe;
@@ -33,27 +35,29 @@ public class RequestEnvelopeTest extends AbstractEnvelopeTest<RequestEnvelope> {
     }
 
     @Override
-    protected RequestEnvelope createEnvelope() {
+    protected EnvelopeDetails<RequestEnvelope> createEnvelope() {
         replyToProbe = new TestProbe(system);
         replyTo = replyToProbe.ref();
-        final TransactionPurgeRequest message = new TransactionPurgeRequest(OBJECT, 2L, replyTo);
-        return new RequestEnvelope(message, 1L, 2L);
+        final int refSize = replyTo.path().toSerializationFormat().length();
+
+        return new EnvelopeDetails<>(new RequestEnvelope(new TransactionPurgeRequest(OBJECT, 2L, replyTo), 1L, 2L),
+            refSize + 179);
     }
 
     @Override
     protected void doAdditionalAssertions(final RequestEnvelope envelope, final RequestEnvelope resolvedObject) {
         final Request<?, ?> actual = resolvedObject.getMessage();
-        Assert.assertTrue(actual instanceof TransactionPurgeRequest);
-        final TransactionPurgeRequest purgeRequest = (TransactionPurgeRequest) actual;
-        Assert.assertEquals(replyTo, purgeRequest.getReplyTo());
-        final TransactionPurgeResponse response = new TransactionPurgeResponse(OBJECT, 2L);
+        assertThat(actual, instanceOf(TransactionPurgeRequest.class));
+        final var purgeRequest = (TransactionPurgeRequest) actual;
+        assertEquals(replyTo, purgeRequest.getReplyTo());
+        final var response = new TransactionPurgeResponse(OBJECT, 2L);
         resolvedObject.sendSuccess(response, 11L);
-        final SuccessEnvelope successEnvelope = replyToProbe.expectMsgClass(SuccessEnvelope.class);
-        Assert.assertEquals(response, successEnvelope.getMessage());
-        final RuntimeRequestException failResponse = new RuntimeRequestException("fail", new RuntimeException());
+        final var successEnvelope = replyToProbe.expectMsgClass(SuccessEnvelope.class);
+        assertEquals(response, successEnvelope.getMessage());
+        final var failResponse = new RuntimeRequestException("fail", new RuntimeException());
         resolvedObject.sendFailure(failResponse, 11L);
-        final FailureEnvelope failureEnvelope = replyToProbe.expectMsgClass(FailureEnvelope.class);
-        Assert.assertEquals(failResponse, failureEnvelope.getMessage().getCause());
+        final var failureEnvelope = replyToProbe.expectMsgClass(FailureEnvelope.class);
+        assertEquals(failResponse, failureEnvelope.getMessage().getCause());
     }
 
     @After
index 30d9e98636e50e036fccdc1791d923ea72d83d6d..9d1aa40b61f151cf38082e89a908692e995faed2 100644 (file)
@@ -7,20 +7,18 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.opendaylight.controller.cluster.access.commands.TransactionAbortSuccess;
 
 public class SuccessEnvelopeTest extends AbstractEnvelopeTest<SuccessEnvelope> {
-
     @Override
-    protected SuccessEnvelope createEnvelope() {
-        final RequestSuccess<?, ?> message = new TransactionAbortSuccess(OBJECT, 2L);
-        return new SuccessEnvelope(message, 1L, 2L, 11L);
+    protected EnvelopeDetails<SuccessEnvelope> createEnvelope() {
+        return new EnvelopeDetails<>(new SuccessEnvelope(new TransactionAbortSuccess(OBJECT, 2L), 1L, 2L, 11L), 180);
     }
 
     @Override
-    protected void doAdditionalAssertions(final SuccessEnvelope envelope,
-                                          final SuccessEnvelope resolvedObject) {
-        Assert.assertEquals(envelope.getExecutionTimeNanos(), resolvedObject.getExecutionTimeNanos());
+    protected void doAdditionalAssertions(final SuccessEnvelope envelope, final SuccessEnvelope resolvedObject) {
+        assertEquals(envelope.getExecutionTimeNanos(), resolvedObject.getExecutionTimeNanos());
     }
 }
\ No newline at end of file
index c59fa9e8381cfb0867bd49ffcee509a9cf0e75b6..78a4e73946a2b6d85f8b39940a1b8ccb5844d9a5 100644 (file)
@@ -4,7 +4,7 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>mdsal-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../parent</relativePath>
     </parent>
 
     <packaging>bundle</packaging>
 
     <dependencies>
+        <dependency>
+            <groupId>com.github.spotbugs</groupId>
+            <artifactId>spotbugs-annotations</artifactId>
+            <optional>true</optional>
+        </dependency>
+        <dependency>
+           <groupId>com.google.guava</groupId>
+           <artifactId>guava</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.eclipse.jdt</groupId>
+            <artifactId>org.eclipse.jdt.annotation</artifactId>
+        </dependency>
         <dependency>
             <groupId>org.opendaylight.controller</groupId>
             <artifactId>cds-access-api</artifactId>
         </dependency>
-
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>repackaged-akka</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>sal-clustering-commons</artifactId>
+        </dependency>
         <dependency>
             <groupId>org.opendaylight.yangtools</groupId>
             <artifactId>concepts</artifactId>
         </dependency>
         <dependency>
-            <groupId>org.opendaylight.yangtools</groupId>
-            <artifactId>yang-data-api</artifactId>
+            <groupId>org.scala-lang</groupId>
+            <artifactId>scala-library</artifactId>
         </dependency>
         <dependency>
-          <groupId>org.opendaylight.controller</groupId>
-          <artifactId>sal-clustering-commons</artifactId>
+            <groupId>org.checkerframework</groupId>
+            <artifactId>checker-qual</artifactId>
+            <optional>true</optional>
         </dependency>
 
         <dependency>
-            <groupId>org.mockito</groupId>
-            <artifactId>mockito-core</artifactId>
+            <groupId>com.typesafe</groupId>
+            <artifactId>config</artifactId>
+            <scope>test</scope>
         </dependency>
         <dependency>
             <groupId>com.typesafe.akka</groupId>
             <artifactId>akka-testkit_2.13</artifactId>
         </dependency>
         <dependency>
-           <groupId>com.google.guava</groupId>
-           <artifactId>guava-testlib</artifactId>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava-testlib</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-common</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-data-api</artifactId>
+            <scope>test</scope>
         </dependency>
         <dependency>
-          <groupId>org.opendaylight.controller</groupId>
-          <artifactId>sal-clustering-commons</artifactId>
-          <type>test-jar</type>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-data-impl</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>sal-clustering-commons</artifactId>
+            <type>test-jar</type>
         </dependency>
     </dependencies>
 
     <build>
+        <pluginManagement>
+            <plugins>
+                <plugin>
+                    <artifactId>maven-javadoc-plugin</artifactId>
+                    <version>3.1.1</version>
+                </plugin>
+            </plugins>
+        </pluginManagement>
+
         <plugins>
             <plugin>
                 <groupId>org.apache.felix</groupId>
index a1c84c0e83cf5d947f3cbc49b2f4590ce03a13b6..98edb1d3419dbde6e97352c964f143f10558bfcb 100644 (file)
@@ -10,17 +10,13 @@ package org.opendaylight.controller.cluster.access.client;
 import akka.actor.ActorRef;
 import akka.actor.PoisonPill;
 import akka.persistence.AbstractPersistentActor;
-import com.google.common.annotations.Beta;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
  * Frontend actor which takes care of persisting generations and creates an appropriate ClientIdentifier.
- *
- * @author Robert Varga
  */
-@Beta
 public abstract class AbstractClientActor extends AbstractPersistentActor {
     private static final Logger LOG = LoggerFactory.getLogger(AbstractClientActor.class);
     private AbstractClientActorBehavior<?> currentBehavior;
index 4188a41fd5720894ffefc2351c2dd803d315352b..39ae396cff714c088c7a8405ee64480a4f9124b5 100644 (file)
@@ -10,7 +10,6 @@ package org.opendaylight.controller.cluster.access.client;
 import static java.util.Objects.requireNonNull;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
 import org.eclipse.jdt.annotation.NonNull;
 import org.eclipse.jdt.annotation.Nullable;
 
@@ -18,10 +17,7 @@ import org.eclipse.jdt.annotation.Nullable;
  * Base behavior attached to {@link AbstractClientActor}.
  *
  * @param <C> Type of associated context
- *
- * @author Robert Varga
  */
-@Beta
 public abstract class AbstractClientActorBehavior<C extends AbstractClientActorContext> implements AutoCloseable {
     private final @NonNull C context;
 
@@ -60,6 +56,7 @@ public abstract class AbstractClientActorBehavior<C extends AbstractClientActorC
 
     @Override
     public void close() {
+        // No-op
     }
 
     /**
index 149f38f9b6cb5ad455c698b00da81ada8ebf0c67..f34760ec03c0057f2393d4a9504b6399de68ecb9 100644 (file)
@@ -76,11 +76,11 @@ public abstract class AbstractClientConnection<T extends BackendInfo> {
     private static final long MAX_DELAY_NANOS = TimeUnit.SECONDS.toNanos(MAX_DELAY_SECONDS);
 
     private final Lock lock = new ReentrantLock();
-    private final ClientActorContext context;
-    @GuardedBy("lock")
-    private final TransmitQueue queue;
+    private final @NonNull ClientActorContext context;
     private final @NonNull Long cookie;
     private final String backendName;
+    @GuardedBy("lock")
+    private final TransmitQueue queue;
 
     @GuardedBy("lock")
     private boolean haveTimer;
@@ -95,12 +95,12 @@ public abstract class AbstractClientConnection<T extends BackendInfo> {
     // Private constructor to avoid code duplication.
     private AbstractClientConnection(final AbstractClientConnection<T> oldConn, final TransmitQueue newQueue,
             final String backendName) {
-        this.context = requireNonNull(oldConn.context);
-        this.cookie = requireNonNull(oldConn.cookie);
+        context = oldConn.context;
+        cookie = oldConn.cookie;
         this.backendName = requireNonNull(backendName);
-        this.queue = requireNonNull(newQueue);
+        queue = requireNonNull(newQueue);
         // Will be updated in finishReplay if needed.
-        this.lastReceivedTicks = oldConn.lastReceivedTicks;
+        lastReceivedTicks = oldConn.lastReceivedTicks;
     }
 
     // This constructor is only to be called by ConnectingClientConnection constructor.
@@ -110,8 +110,8 @@ public abstract class AbstractClientConnection<T extends BackendInfo> {
         this.context = requireNonNull(context);
         this.cookie = requireNonNull(cookie);
         this.backendName = requireNonNull(backendName);
-        this.queue = new TransmitQueue.Halted(queueDepth);
-        this.lastReceivedTicks = currentTime();
+        queue = new TransmitQueue.Halted(queueDepth);
+        lastReceivedTicks = currentTime();
     }
 
     // This constructor is only to be called (indirectly) by ReconnectingClientConnection constructor.
@@ -128,7 +128,7 @@ public abstract class AbstractClientConnection<T extends BackendInfo> {
             requireNonNull(oldConn.context).messageSlicer()), newBackend.getName());
     }
 
-    public final ClientActorContext context() {
+    public final @NonNull ClientActorContext context() {
         return context;
     }
 
@@ -136,7 +136,7 @@ public abstract class AbstractClientConnection<T extends BackendInfo> {
         return cookie;
     }
 
-    public final ActorRef localActor() {
+    public final @NonNull ActorRef localActor() {
         return context.self();
     }
 
@@ -345,7 +345,7 @@ public abstract class AbstractClientConnection<T extends BackendInfo> {
 
                 if (delay.isPresent()) {
                     // If there is new delay, schedule a timer
-                    scheduleTimer(delay.getAsLong());
+                    scheduleTimer(delay.orElseThrow());
                 } else {
                     LOG.debug("{}: not scheduling timeout on {}", context.persistenceId(), this);
                 }
@@ -489,7 +489,7 @@ public abstract class AbstractClientConnection<T extends BackendInfo> {
         }
 
         if (maybeEntry.isPresent()) {
-            final TransmittedConnectionEntry entry = maybeEntry.get();
+            final TransmittedConnectionEntry entry = maybeEntry.orElseThrow();
             LOG.debug("Completing {} with {}", entry, envelope);
             entry.complete(envelope.getMessage());
         }
index 14ca1ef38c8f850071d3e1b04f520faa7a496b69..3f8c11a9137ed3bc5cccda43b8a37f55d6f46f3d 100644 (file)
@@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.access.client;
 
 import static java.util.Objects.requireNonNull;
 
-import com.google.common.annotations.Beta;
 import com.google.common.base.Stopwatch;
 import com.google.common.base.Verify;
 import java.util.Collection;
@@ -45,10 +44,7 @@ import scala.concurrent.duration.FiniteDuration;
 
 /**
  * A behavior, which handles messages sent to a {@link AbstractClientActor}.
- *
- * @author Robert Varga
  */
-@Beta
 public abstract class ClientActorBehavior<T extends BackendInfo> extends
         RecoveredClientActorBehavior<ClientActorContext> implements Identifiable<ClientIdentifier> {
     /**
@@ -152,12 +148,11 @@ public abstract class ClientActorBehavior<T extends BackendInfo> extends
             return ((InternalCommand<T>) command).execute(this);
         }
 
-        if (command instanceof SuccessEnvelope) {
-            return onRequestSuccess((SuccessEnvelope) command);
+        if (command instanceof SuccessEnvelope successEnvelope) {
+            return onRequestSuccess(successEnvelope);
         }
-
-        if (command instanceof FailureEnvelope) {
-            return internalOnRequestFailure((FailureEnvelope) command);
+        if (command instanceof FailureEnvelope failureEnvelope) {
+            return internalOnRequestFailure(failureEnvelope);
         }
 
         if (MessageAssembler.isHandledMessage(command)) {
@@ -174,10 +169,10 @@ public abstract class ClientActorBehavior<T extends BackendInfo> extends
     }
 
     private static long extractCookie(final Identifier id) {
-        if (id instanceof TransactionIdentifier) {
-            return ((TransactionIdentifier) id).getHistoryId().getCookie();
-        } else if (id instanceof LocalHistoryIdentifier) {
-            return ((LocalHistoryIdentifier) id).getCookie();
+        if (id instanceof TransactionIdentifier transactionId) {
+            return transactionId.getHistoryId().getCookie();
+        } else if (id instanceof LocalHistoryIdentifier historyId) {
+            return historyId.getCookie();
         } else {
             throw new IllegalArgumentException("Unhandled identifier " + id);
         }
@@ -215,7 +210,7 @@ public abstract class ClientActorBehavior<T extends BackendInfo> extends
              * sessionId and if it does not match our current connection just ignore it.
              */
             final Optional<T> optBackend = conn.getBackendInfo();
-            if (optBackend.isPresent() && optBackend.get().getSessionId() != command.getSessionId()) {
+            if (optBackend.isPresent() && optBackend.orElseThrow().getSessionId() != command.getSessionId()) {
                 LOG.debug("{}: Mismatched current connection {} and envelope {}, ignoring response", persistenceId(),
                     conn, command);
                 return this;
@@ -327,8 +322,8 @@ public abstract class ClientActorBehavior<T extends BackendInfo> extends
 
             LOG.error("{}: failed to resolve shard {}", persistenceId(), shard, failure);
             final RequestException cause;
-            if (failure instanceof RequestException) {
-                cause = (RequestException) failure;
+            if (failure instanceof RequestException requestException) {
+                cause = requestException;
             } else {
                 cause = new RuntimeRequestException("Failed to resolve shard " + shard, failure);
             }
@@ -420,7 +415,7 @@ public abstract class ClientActorBehavior<T extends BackendInfo> extends
 
         final Long shard = oldConn.cookie();
         LOG.info("{}: refreshing backend for shard {}", persistenceId(), shard);
-        resolver().refreshBackendInfo(shard, conn.getBackendInfo().get()).whenComplete(
+        resolver().refreshBackendInfo(shard, conn.getBackendInfo().orElseThrow()).whenComplete(
             (backend, failure) -> context().executeInActor(behavior -> {
                 backendConnectFinished(shard, conn, backend, failure);
                 return behavior;
index 0864da10b06272c892f67d50aac9e8831e45b0d9..abebf02197965fc7b46ccca446009ec208e8b655 100644 (file)
@@ -13,7 +13,6 @@ import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.actor.Cancellable;
 import akka.actor.Scheduler;
-import com.google.common.annotations.Beta;
 import com.google.common.base.Ticker;
 import java.util.concurrent.TimeUnit;
 import org.eclipse.jdt.annotation.NonNull;
@@ -32,10 +31,7 @@ import scala.concurrent.duration.FiniteDuration;
  * Time-keeping in a client actor is based on monotonic time. The precision of this time can be expected to be the
  * same as {@link System#nanoTime()}, but it is not tied to that particular clock. Actor clock is exposed as
  * a {@link Ticker}, which can be obtained via {@link #ticker()}. This class is thread-safe.
- *
- * @author Robert Varga
  */
-@Beta
 public class ClientActorContext extends AbstractClientActorContext implements Identifiable<ClientIdentifier> {
     private final ExecutionContext executionContext;
     private final ClientIdentifier identifier;
@@ -49,9 +45,9 @@ public class ClientActorContext extends AbstractClientActorContext implements Id
             final ClientIdentifier identifier, final ClientActorConfig config) {
         super(self, persistenceId);
         this.identifier = requireNonNull(identifier);
-        this.scheduler = requireNonNull(system).scheduler();
-        this.executionContext = system.dispatcher();
-        this.dispatchers = new Dispatchers(system.dispatchers());
+        scheduler = requireNonNull(system).scheduler();
+        executionContext = system.dispatcher();
+        dispatchers = new Dispatchers(system.dispatchers());
         this.config = requireNonNull(config);
 
         messageSlicer = MessageSlicer.builder().messageSliceSize(config.getMaximumMessageSliceSize())
index 8a5af45d155162a1e03e94448bfd5a6964604de3..8bcce85dd3e85f088016154f74f68513827bc5b2 100644 (file)
@@ -7,12 +7,14 @@
  */
 package org.opendaylight.controller.cluster.access.client;
 
-import com.google.common.annotations.Beta;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 
-@Beta
+/**
+ * A connected connection.
+ *
+ * @param <T> Backend info type
+ */
 public final class ConnectedClientConnection<T extends BackendInfo> extends AbstractReceivingClientConnection<T> {
-
     ConnectedClientConnection(final AbstractClientConnection<T> oldConnection, final T newBackend) {
         super(oldConnection, newBackend);
     }
index 10159901064c6578f3c530ff00d16aa051c9bb53..445321b474cd725a03e3e41bf6dd688786e9de42 100644 (file)
@@ -7,11 +7,9 @@
  */
 package org.opendaylight.controller.cluster.access.client;
 
-import com.google.common.annotations.Beta;
 import java.util.Optional;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 
-@Beta
 public final class ConnectingClientConnection<T extends BackendInfo> extends AbstractClientConnection<T> {
     /**
      * A wild estimate on how deep a queue should be. Without having knowledge of the remote actor we can only
index b47ddee2a3c5707624ca78b11579a0a3e9894473..c5e47e76dd8e22b272af4c208250c96765cb83df 100644 (file)
@@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.access.client;
 
 import static java.util.Objects.requireNonNull;
 
-import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects;
 import com.google.common.base.MoreObjects.ToStringHelper;
 import java.util.function.Consumer;
@@ -20,10 +19,7 @@ import org.opendaylight.yangtools.concepts.Immutable;
 /**
  * Single entry in a {@link AbstractClientConnection}. Tracks the request, the associated callback and time when
  * the request was first enqueued.
- *
- * @author Robert Varga
  */
-@Beta
 public class ConnectionEntry implements Immutable {
     private final Consumer<Response<?, ?>> callback;
     private final Request<?, ?> request;
@@ -32,7 +28,7 @@ public class ConnectionEntry implements Immutable {
     ConnectionEntry(final Request<?, ?> request, final Consumer<Response<?, ?>> callback, final long now) {
         this.request = requireNonNull(request);
         this.callback = requireNonNull(callback);
-        this.enqueuedTicks = now;
+        enqueuedTicks = now;
     }
 
     ConnectionEntry(final ConnectionEntry entry) {
index 08bc05346b65d1ce53faa7d3e64e8f38fc6a5359..0917174b654a62895b02f148a5e88b74074815c2 100644 (file)
@@ -7,25 +7,35 @@
  */
 package org.opendaylight.controller.cluster.access.client;
 
-import com.google.common.annotations.Beta;
-import com.google.common.base.Verify;
+import static com.google.common.base.Verify.verify;
+import static com.google.common.base.Verify.verifyNotNull;
+
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.VarHandle;
 import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
 import java.util.concurrent.locks.StampedLock;
 
 /**
  * A lock implementation which allows users to perform optimistic reads and validate them in a fashion similar
  * to {@link StampedLock}. In case a read is contented with a write, the read side will throw
  * an {@link InversibleLockException}, which the caller can catch and use to wait for the write to resolve.
- *
- * @author Robert Varga
  */
-@Beta
 public final class InversibleLock {
-    private static final AtomicReferenceFieldUpdater<InversibleLock, CountDownLatch> LATCH_UPDATER =
-            AtomicReferenceFieldUpdater.newUpdater(InversibleLock.class, CountDownLatch.class, "latch");
+    private static final VarHandle LATCH;
+
+    static {
+        try {
+            LATCH = MethodHandles.lookup().findVarHandle(InversibleLock.class, "latch", CountDownLatch.class);
+        } catch (NoSuchFieldException | IllegalAccessException e) {
+            throw new ExceptionInInitializerError(e);
+        }
+    }
 
     private final StampedLock lock = new StampedLock();
+
+    @SuppressFBWarnings(value = "UWF_UNWRITTEN_FIELD",
+        justification = "https://github.com/spotbugs/spotbugs/issues/2749")
     private volatile CountDownLatch latch;
 
     /**
@@ -43,7 +53,7 @@ public final class InversibleLock {
 
             // Write-locked. Read the corresponding latch and if present report an exception, which will propagate
             // and force release of locks.
-            final CountDownLatch local = latch;
+            final var local = latch;
             if (local != null) {
                 throw new InversibleLockException(local);
             }
@@ -57,18 +67,13 @@ public final class InversibleLock {
     }
 
     public long writeLock() {
-        final CountDownLatch local = new CountDownLatch(1);
-        final boolean taken = LATCH_UPDATER.compareAndSet(this, null, local);
-        Verify.verify(taken);
-
+        verify(LATCH.compareAndSet(this, null, new CountDownLatch(1)));
         return lock.writeLock();
     }
 
     public void unlockWrite(final long stamp) {
-        final CountDownLatch local = LATCH_UPDATER.getAndSet(this, null);
-        Verify.verifyNotNull(local);
+        final var local = verifyNotNull((CountDownLatch) LATCH.getAndSet(this, null));
         lock.unlockWrite(stamp);
         local.countDown();
     }
-
 }
index a2f2ffd7e87eb15b8bb9bbcf45460346eea7610a..82b6568459a34cdedaf24f7ddb91d32cba61b348 100644 (file)
@@ -9,17 +9,15 @@ package org.opendaylight.controller.cluster.access.client;
 
 import static java.util.Objects.requireNonNull;
 
-import com.google.common.annotations.Beta;
+import java.io.Serial;
 import java.util.concurrent.CountDownLatch;
 
 /**
  * Exception thrown from {@link InversibleLock#optimisticRead()} and can be used to wait for the racing write
  * to complete using {@link #awaitResolution()}.
- *
- * @author Robert Varga
  */
-@Beta
 public final class InversibleLockException extends RuntimeException {
+    @Serial
     private static final long serialVersionUID = 1L;
 
     private final transient CountDownLatch latch;
index fed9d4c5d3d01d5763b1b6420450c10d7f197fc5..677a57e770f6a3dd802d5ef22cddee268c58b0ec 100644 (file)
@@ -118,12 +118,13 @@ abstract class ProgressTracker {
      * @param now tick number corresponding to caller's present
      */
     ProgressTracker(final ProgressTracker oldTracker, final long now) {
-        this.defaultTicksPerTask = oldTracker.defaultTicksPerTask;
-        this.tasksEncountered = this.tasksClosed = oldTracker.tasksClosed;
-        this.lastClosed = oldTracker.lastClosed;
-        this.nearestAllowed = oldTracker.nearestAllowed;  // Call cancelDebt explicitly if needed.
-        this.lastIdle = oldTracker.lastIdle;
-        this.elapsedBeforeIdle = oldTracker.elapsedBeforeIdle;
+        defaultTicksPerTask = oldTracker.defaultTicksPerTask;
+        tasksEncountered = tasksClosed = oldTracker.tasksClosed;
+        lastClosed = oldTracker.lastClosed;
+        // Call cancelDebt explicitly if needed.
+        nearestAllowed = oldTracker.nearestAllowed;
+        lastIdle = oldTracker.lastIdle;
+        elapsedBeforeIdle = oldTracker.elapsedBeforeIdle;
         if (!oldTracker.isIdle()) {
             transitToIdle(now);
         }
@@ -154,7 +155,8 @@ abstract class ProgressTracker {
      *
      * @return number of tasks started but not finished yet
      */
-    final long tasksOpen() {  // TODO: Should we return int?
+    // TODO: Should we return int?
+    final long tasksOpen() {
         // TODO: Should we check the return value is non-negative?
         return tasksEncountered - tasksClosed;
     }
index f40deab30d5bed8bbf1a7eed8da3e76653ce27bf..b44d54921d9de38b898a06633f6c5dec25b2c36e 100644 (file)
@@ -63,8 +63,8 @@ final class RecoveringClientActorBehavior extends AbstractClientActorBehavior<In
             LOG.debug("{}: persisting new identifier {}", persistenceId(), nextId);
             context().saveSnapshot(nextId);
             return new SavingClientActorBehavior(context(), nextId);
-        } else if (recover instanceof SnapshotOffer) {
-            lastId = (ClientIdentifier) ((SnapshotOffer)recover).snapshot();
+        } else if (recover instanceof SnapshotOffer snapshotOffer) {
+            lastId = (ClientIdentifier) snapshotOffer.snapshot();
             LOG.debug("{}: recovered identifier {}", persistenceId(), lastId);
         } else {
             LOG.warn("{}: ignoring recovery message {}", persistenceId(), recover);
index 42823a19ab04400f70472e71e75b974e5f5a6840..feca185812c7652c4ef84bb74d204723e9864ff4 100644 (file)
@@ -29,26 +29,24 @@ final class SavingClientActorBehavior extends RecoveredClientActorBehavior<Initi
 
     SavingClientActorBehavior(final InitialClientActorContext context, final ClientIdentifier nextId) {
         super(context);
-        this.myId = requireNonNull(nextId);
+        myId = requireNonNull(nextId);
     }
 
     @Override
     AbstractClientActorBehavior<?> onReceiveCommand(final Object command) {
-        if (command instanceof SaveSnapshotFailure) {
-            LOG.error("{}: failed to persist state", persistenceId(), ((SaveSnapshotFailure) command).cause());
+        if (command instanceof SaveSnapshotFailure saveFailure) {
+            LOG.error("{}: failed to persist state", persistenceId(), saveFailure.cause());
             return null;
-        } else if (command instanceof SaveSnapshotSuccess) {
-            LOG.debug("{}: got command: {}", persistenceId(), command);
-            SaveSnapshotSuccess saved = (SaveSnapshotSuccess)command;
+        } else if (command instanceof SaveSnapshotSuccess saved) {
+            LOG.debug("{}: got command: {}", persistenceId(), saved);
             context().deleteSnapshots(new SnapshotSelectionCriteria(scala.Long.MaxValue(),
                     saved.metadata().timestamp() - 1, 0L, 0L));
             return this;
-        } else if (command instanceof DeleteSnapshotsSuccess) {
-            LOG.debug("{}: got command: {}", persistenceId(), command);
-        } else if (command instanceof DeleteSnapshotsFailure) {
+        } else if (command instanceof DeleteSnapshotsSuccess deleteSuccess) {
+            LOG.debug("{}: got command: {}", persistenceId(), deleteSuccess);
+        } else if (command instanceof DeleteSnapshotsFailure deleteFailure) {
             // Not treating this as a fatal error.
-            LOG.warn("{}: failed to delete prior snapshots", persistenceId(),
-                    ((DeleteSnapshotsFailure) command).cause());
+            LOG.warn("{}: failed to delete prior snapshots", persistenceId(), deleteFailure.cause());
         } else {
             LOG.debug("{}: stashing command {}", persistenceId(), command);
             context().stash();
index 71de580bd3f291d200170078db9ab939187d6da9..cc3da1e4503118ed5a064ceeb60fd34c4fe9e12d 100644 (file)
@@ -53,10 +53,8 @@ import org.slf4j.LoggerFactory;
  *
  * <p>
  * This class is not thread-safe, as it is expected to be guarded by {@link AbstractClientConnection}.
- *
- * @author Robert Varga
  */
-abstract class TransmitQueue {
+abstract sealed class TransmitQueue {
     static final class Halted extends TransmitQueue {
         // For ConnectingClientConnection.
         Halted(final int targetDepth) {
@@ -148,7 +146,8 @@ abstract class TransmitQueue {
 
     private final Deque<TransmittedConnectionEntry> inflight = new ArrayDeque<>();
     private final Deque<ConnectionEntry> pending = new ArrayDeque<>();
-    private final AveragingProgressTracker tracker;  // Cannot be just ProgressTracker as we are inheriting limits.
+    // Cannot be just ProgressTracker as we are inheriting limits.
+    private final AveragingProgressTracker tracker;
     private ReconnectForwarder successor;
 
     /**
@@ -218,7 +217,7 @@ abstract class TransmitQueue {
             return Optional.empty();
         }
 
-        final TransmittedConnectionEntry entry = maybeEntry.get();
+        final TransmittedConnectionEntry entry = maybeEntry.orElseThrow();
         tracker.closeTask(now, entry.getEnqueuedTicks(), entry.getTxTicks(), envelope.getExecutionTimeNanos());
 
         // We have freed up a slot, try to transmit something
@@ -256,7 +255,7 @@ abstract class TransmitQueue {
             return false;
         }
 
-        inflight.addLast(maybeTransmitted.get());
+        inflight.addLast(maybeTransmitted.orElseThrow());
         return true;
     }
 
@@ -425,12 +424,10 @@ abstract class TransmitQueue {
             }
 
             // Check if the entry has (ever) been transmitted
-            if (!(e instanceof TransmittedConnectionEntry)) {
+            if (!(e instanceof TransmittedConnectionEntry te)) {
                 return Optional.empty();
             }
 
-            final TransmittedConnectionEntry te = (TransmittedConnectionEntry) e;
-
             // Now check session match
             if (envelope.getSessionId() != te.getSessionId()) {
                 LOG.debug("Expecting session {}, ignoring response {}", te.getSessionId(), envelope);
index 0b630e2da8fd7b0769bf2c7e88da51184ac87bbd..90ffd77a347e7042c8a099169f4bc19b0acc3b03 100644 (file)
@@ -7,11 +7,11 @@
  */
 package org.opendaylight.controller.cluster.access.client;
 
+import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.timeout;
 import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
 
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
@@ -22,34 +22,50 @@ import akka.persistence.SnapshotMetadata;
 import akka.testkit.TestProbe;
 import akka.testkit.javadsl.TestKit;
 import com.typesafe.config.ConfigFactory;
-import java.lang.reflect.Field;
 import java.util.Optional;
 import java.util.concurrent.TimeUnit;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.mockito.Answers;
+import org.mockito.Mock;
+import org.mockito.junit.jupiter.MockitoExtension;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendType;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import scala.concurrent.duration.FiniteDuration;
 
-public class ActorBehaviorTest {
-
+@ExtendWith(MockitoExtension.class)
+class ActorBehaviorTest {
     private static final String MEMBER_1_FRONTEND_TYPE_1 = "member-1-frontend-type-1";
     private static final FiniteDuration TIMEOUT = FiniteDuration.create(5, TimeUnit.SECONDS);
 
+    @Mock
+    private InternalCommand<BackendInfo> cmd;
+    @Mock(answer = Answers.CALLS_REAL_METHODS)
+    private ClientActorBehavior<BackendInfo> initialBehavior;
+    @Mock
+    private AbstractClientActorContext ctx;
+
     private ActorSystem system;
     private TestProbe probe;
-    private ClientActorBehavior<BackendInfo> initialBehavior;
     private MockedSnapshotStore.SaveRequest saveRequest;
     private FrontendIdentifier id;
     private ActorRef mockedActor;
 
-    @Before
-    public void setUp() throws Exception {
-        initialBehavior = createInitialBehaviorMock();
+    @BeforeEach
+    void beforeEach() throws Exception {
+        //persistenceId() in AbstractClientActorBehavior is final and can't be mocked
+        //use reflection to work around this
+        final var context = AbstractClientActorBehavior.class.getDeclaredField("context");
+        context.setAccessible(true);
+        context.set(initialBehavior, ctx);
+        final var persistenceId = AbstractClientActorContext.class.getDeclaredField("persistenceId");
+        persistenceId.setAccessible(true);
+        persistenceId.set(ctx, MEMBER_1_FRONTEND_TYPE_1);
+
         system = ActorSystem.apply("system1");
         final ActorRef storeRef = system.registerExtension(Persistence.lookup()).snapshotStoreFor(null,
             ConfigFactory.empty());
@@ -62,25 +78,23 @@ public class ActorBehaviorTest {
         saveRequest = handleRecovery(null);
     }
 
-    @After
-    public void tearDown() {
+    @AfterEach
+    void afterEach() {
         TestKit.shutdownActorSystem(system);
     }
 
     @Test
-    public void testInitialBehavior() {
-        final InternalCommand<BackendInfo> cmd = mock(InternalCommand.class);
-        when(cmd.execute(any())).thenReturn(initialBehavior);
+    void testInitialBehavior() {
+        doReturn(initialBehavior).when(cmd).execute(any());
         mockedActor.tell(cmd, ActorRef.noSender());
         verify(cmd, timeout(1000)).execute(initialBehavior);
     }
 
     @Test
-    public void testCommandStashing() {
+    void testCommandStashing() {
         system.stop(mockedActor);
         mockedActor = system.actorOf(MockedActor.props(id, initialBehavior));
-        final InternalCommand<BackendInfo> cmd = mock(InternalCommand.class);
-        when(cmd.execute(any())).thenReturn(initialBehavior);
+        doReturn(initialBehavior).when(cmd).execute(any());
         //send messages before recovery is completed
         mockedActor.tell(cmd, ActorRef.noSender());
         mockedActor.tell(cmd, ActorRef.noSender());
@@ -91,16 +105,16 @@ public class ActorBehaviorTest {
     }
 
     @Test
-    public void testRecoveryAfterRestart() {
+    void testRecoveryAfterRestart() {
         system.stop(mockedActor);
         mockedActor = system.actorOf(MockedActor.props(id, initialBehavior));
         final MockedSnapshotStore.SaveRequest newSaveRequest =
                 handleRecovery(new SelectedSnapshot(saveRequest.getMetadata(), saveRequest.getSnapshot()));
-        Assert.assertEquals(MEMBER_1_FRONTEND_TYPE_1, newSaveRequest.getMetadata().persistenceId());
+        assertEquals(MEMBER_1_FRONTEND_TYPE_1, newSaveRequest.getMetadata().persistenceId());
     }
 
     @Test
-    public void testRecoveryAfterRestartFrontendIdMismatch() {
+    void testRecoveryAfterRestartFrontendIdMismatch() {
         system.stop(mockedActor);
         //start actor again
         mockedActor = system.actorOf(MockedActor.props(id, initialBehavior));
@@ -117,7 +131,7 @@ public class ActorBehaviorTest {
     }
 
     @Test
-    public void testRecoveryAfterRestartSaveSnapshotFail() {
+    void testRecoveryAfterRestartSaveSnapshotFail() {
         system.stop(mockedActor);
         mockedActor = system.actorOf(MockedActor.props(id, initialBehavior));
         probe.watch(mockedActor);
@@ -130,7 +144,7 @@ public class ActorBehaviorTest {
     }
 
     @Test
-    public void testRecoveryAfterRestartDeleteSnapshotsFail() {
+    void testRecoveryAfterRestartDeleteSnapshotsFail() {
         system.stop(mockedActor);
         mockedActor = system.actorOf(MockedActor.props(id, initialBehavior));
         probe.watch(mockedActor);
@@ -144,21 +158,6 @@ public class ActorBehaviorTest {
         probe.expectNoMessage();
     }
 
-    @SuppressWarnings("unchecked")
-    private static ClientActorBehavior<BackendInfo> createInitialBehaviorMock() throws Exception {
-        final ClientActorBehavior<BackendInfo> initialBehavior = mock(ClientActorBehavior.class);
-        //persistenceId() in AbstractClientActorBehavior is final and can't be mocked
-        //use reflection to work around this
-        final Field context = AbstractClientActorBehavior.class.getDeclaredField("context");
-        context.setAccessible(true);
-        final AbstractClientActorContext ctx = mock(AbstractClientActorContext.class);
-        context.set(initialBehavior, ctx);
-        final Field persistenceId = AbstractClientActorContext.class.getDeclaredField("persistenceId");
-        persistenceId.setAccessible(true);
-        persistenceId.set(ctx, MEMBER_1_FRONTEND_TYPE_1);
-        return initialBehavior;
-    }
-
     private MockedSnapshotStore.SaveRequest handleRecovery(final SelectedSnapshot savedState) {
         probe.expectMsgClass(MockedSnapshotStore.LoadRequest.class);
         //offer snapshot
@@ -173,7 +172,6 @@ public class ActorBehaviorTest {
     }
 
     private static class MockedActor extends AbstractClientActor {
-
         private final ClientActorBehavior<?> initialBehavior;
         private final ClientActorConfig mockConfig = AccessClientUtil.newMockClientActorConfig();
 
@@ -196,5 +194,4 @@ public class ActorBehaviorTest {
             return mockConfig;
         }
     }
-
 }
index 1af656b341184600defa19daaf62eb4ee9ffd3d9..819de8b8b4a43bc4927bea3454688a4fbeded39f 100644 (file)
@@ -27,7 +27,8 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
 import org.opendaylight.controller.cluster.messaging.MessageSlice;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 
 public class ConnectedClientConnectionTest
         extends AbstractClientConnectionTest<ConnectedClientConnection<BackendInfo>, BackendInfo> {
@@ -42,7 +43,7 @@ public class ConnectedClientConnectionTest
 
     @Override
     protected ConnectedClientConnection<BackendInfo> createConnection() {
-        final BackendInfo backend = new BackendInfo(backendProbe.ref(), "test", 0L, ABIVersion.BORON, 10);
+        final BackendInfo backend = new BackendInfo(backendProbe.ref(), "test", 0L, ABIVersion.current(), 10);
         final ConnectingClientConnection<BackendInfo> connectingConn = new ConnectingClientConnection<>(context, 0L,
                 backend.getName());
         return new ConnectedClientConnection<>(connectingConn, backend);
@@ -70,9 +71,10 @@ public class ConnectedClientConnectionTest
                 new TransactionIdentifier(new LocalHistoryIdentifier(CLIENT_ID, 0L), 0L);
         ModifyTransactionRequestBuilder reqBuilder =
                 new ModifyTransactionRequestBuilder(identifier, replyToProbe.ref());
-        reqBuilder.addModification(new TransactionWrite(YangInstanceIdentifier.empty(), Builders.containerBuilder()
-                .withNodeIdentifier(YangInstanceIdentifier.NodeIdentifier.create(
-                        QName.create("namespace", "localName"))).build()));
+        reqBuilder.addModification(new TransactionWrite(YangInstanceIdentifier.of(),
+            ImmutableNodes.newContainerBuilder()
+                .withNodeIdentifier(new NodeIdentifier(QName.create("namespace", "localName")))
+                .build()));
         reqBuilder.setSequence(0L);
         final Request<?, ?> request = reqBuilder.build();
         connection.sendRequest(request, callback);
index d566e0ec3b4f515903fa8894862bae9bb978a33e..f77500328d06a01ae1482c1a0fb39ebd1085e2d9 100644 (file)
@@ -38,8 +38,6 @@ import org.mockito.ArgumentCaptor;
 import org.mockito.Mock;
 import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestProxy;
 import org.opendaylight.controller.cluster.access.concepts.FailureEnvelope;
 import org.opendaylight.controller.cluster.access.concepts.Request;
 import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope;
@@ -64,8 +62,7 @@ public class ConnectingClientConnectionTest {
         }
 
         @Override
-        protected AbstractRequestFailureProxy<WritableIdentifier, MockFailure> externalizableProxy(
-                final ABIVersion version) {
+        protected SerialForm<WritableIdentifier, MockFailure> externalizableProxy(final ABIVersion version) {
             return null;
         }
 
@@ -88,7 +85,7 @@ public class ConnectingClientConnectionTest {
         }
 
         @Override
-        protected AbstractRequestProxy<WritableIdentifier, MockRequest> externalizableProxy(final ABIVersion version) {
+        protected Request.SerialForm<WritableIdentifier, MockRequest> externalizableProxy(final ABIVersion version) {
             return null;
         }
 
index 2d1afb81adea475069edba3193e873f2fe9e3ecf..b3bfdec66e9b0c184daeb7e4127495e59c66006d 100644 (file)
@@ -26,8 +26,6 @@ import org.junit.runner.RunWith;
 import org.mockito.Mock;
 import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestProxy;
 import org.opendaylight.controller.cluster.access.concepts.Request;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 import org.opendaylight.controller.cluster.access.concepts.RequestFailure;
@@ -47,8 +45,7 @@ public class ConnectionEntryTest {
         }
 
         @Override
-        protected AbstractRequestFailureProxy<WritableIdentifier, MockFailure> externalizableProxy(
-                final ABIVersion version) {
+        protected SerialForm<WritableIdentifier, MockFailure> externalizableProxy(final ABIVersion version) {
             return null;
         }
 
@@ -71,7 +68,7 @@ public class ConnectionEntryTest {
         }
 
         @Override
-        protected AbstractRequestProxy<WritableIdentifier, MockRequest> externalizableProxy(final ABIVersion version) {
+        protected Request.SerialForm<WritableIdentifier, MockRequest> externalizableProxy(final ABIVersion version) {
             return null;
         }
 
index 84cfea481b6b27e906ac4de03e6b9c3773f273f0..367acb3b6d74f068c0caf3d26ce2db583432d9d8 100644 (file)
@@ -44,7 +44,7 @@ public class ReconnectingClientConnectionTest
 
     @Override
     protected ReconnectingClientConnection<BackendInfo> createConnection() {
-        final BackendInfo backend = new BackendInfo(backendProbe.ref(), "test", 0L, ABIVersion.BORON, 10);
+        final BackendInfo backend = new BackendInfo(backendProbe.ref(), "test", 0L, ABIVersion.current(), 10);
         final ConnectingClientConnection<BackendInfo> connectingConn = new ConnectingClientConnection<>(context, 0L,
                 backend.getName());
         final ConnectedClientConnection<BackendInfo> connectedConn =
index b40aefb663ae11b80472cba459f0dbeb6d3a4f97..9974e1b1cd88b6e60cffac28f7bbbc5aa65a98d0 100644 (file)
@@ -64,7 +64,7 @@ public class TransmittingTransmitQueueTest extends AbstractTransmitQueueTest<Tra
     @Override
     protected TransmitQueue.Transmitting createQueue() {
         doReturn(false).when(mockMessageSlicer).slice(any());
-        backendInfo = new BackendInfo(probe.ref(), "test", 0L, ABIVersion.BORON, 3);
+        backendInfo = new BackendInfo(probe.ref(), "test", 0L, ABIVersion.current(), 3);
         return new TransmitQueue.Transmitting(new TransmitQueue.Halted(0), 0, backendInfo, now(), mockMessageSlicer);
     }
 
@@ -146,8 +146,8 @@ public class TransmittingTransmitQueueTest extends AbstractTransmitQueueTest<Tra
 
         Optional<TransmittedConnectionEntry> transmitted = queue.transmit(entry, now);
         assertTrue(transmitted.isPresent());
-        assertEquals(request, transmitted.get().getRequest());
-        assertEquals(callback, transmitted.get().getCallback());
+        assertEquals(request, transmitted.orElseThrow().getRequest());
+        assertEquals(callback, transmitted.orElseThrow().getCallback());
 
         final RequestEnvelope requestEnvelope = probe.expectMsgClass(RequestEnvelope.class);
         assertEquals(request, requestEnvelope.getMessage());
index 28ae088e5a2275f68a448c6a31e6ebc50e2f1529..a28781c07d2c2b8e703ed6de09672338bf718566 100644 (file)
@@ -4,7 +4,7 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>mdsal-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../parent</relativePath>
     </parent>
 
 
     <dependencies>
         <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>repackaged-akka</artifactId>
+            <groupId>org.eclipse.jdt</groupId>
+            <artifactId>org.eclipse.jdt.annotation</artifactId>
         </dependency>
-
         <dependency>
             <groupId>org.opendaylight.yangtools</groupId>
             <artifactId>concepts</artifactId>
         </dependency>
-        <dependency>
-            <groupId>org.opendaylight.mdsal</groupId>
-            <artifactId>mdsal-dom-api</artifactId>
-        </dependency>
     </dependencies>
 
     <build>
index 696fec275316b7f231babae454f0b5df80c85a23..cd20186349fce062f4a4bd65d00398104274ae87 100644 (file)
@@ -7,14 +7,9 @@
  */
 package org.opendaylight.controller.cluster.dom.api;
 
-import com.google.common.annotations.Beta;
-
 /**
  * Enumeration of possible shard leader locations relative to the local node.
- *
- * @author Robert Varga
  */
-@Beta
 public enum LeaderLocation {
     /**
      * The leader is co-located on this node.
index 4f5fd4e83e1bac0da0021b98eb81578dfacab7b4..69e34ca44cdb64f10e41ea1e1a7d52facfbdebaf 100644 (file)
@@ -7,17 +7,12 @@
  */
 package org.opendaylight.controller.cluster.dom.api;
 
-import com.google.common.annotations.Beta;
-import java.util.EventListener;
 import org.eclipse.jdt.annotation.NonNull;
 
 /**
  * Listener for shard leader location changes.
- *
- * @author Robert Varga
  */
-@Beta
-public interface LeaderLocationListener extends EventListener {
+public interface LeaderLocationListener {
     /**
      * Invoked when shard leader location changes.
      *
diff --git a/opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/LeaderLocationListenerRegistration.java b/opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/LeaderLocationListenerRegistration.java
deleted file mode 100644 (file)
index 61f6426..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.dom.api;
-
-import com.google.common.annotations.Beta;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-
-/**
- * Registration of a {@link LeaderLocationListener}.
- *
- * @author Robert Varga
- *
- * @param <T> Listener type
- */
-@Beta
-public interface LeaderLocationListenerRegistration<T extends LeaderLocationListener> extends ListenerRegistration<T> {
-
-}
index c60b2e20138de71eed648076a4314d8cd91c0eb1..2ea3c28f04caf53045466b000001c2c0b7f2f2bd 100644 (file)
@@ -4,13 +4,20 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>mdsal-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../parent</relativePath>
     </parent>
 
     <artifactId>cds-mgmt-api</artifactId>
     <packaging>bundle</packaging>
 
+    <dependencies>
+        <dependency>
+            <groupId>org.eclipse.jdt</groupId>
+            <artifactId>org.eclipse.jdt.annotation</artifactId>
+        </dependency>
+    </dependencies>
+
     <build>
         <plugins>
             <plugin>
index 0eeed2cee96c4a9a25378d594d5a766c0e85689f..dce797ca4fba90d6cd4c2e7259c5540689c04834 100644 (file)
@@ -14,7 +14,7 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>mdsal-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../parent</relativePath>
     </parent>
 
 
     <dependencies>
         <dependency>
-            <groupId>com.google.guava</groupId>
-            <artifactId>guava</artifactId>
+            <groupId>com.github.spotbugs</groupId>
+            <artifactId>spotbugs-annotations</artifactId>
+            <optional>true</optional>
         </dependency>
         <dependency>
-            <groupId>com.typesafe</groupId>
-            <artifactId>config</artifactId>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.controller</groupId>
@@ -80,8 +81,8 @@
             <optional>true</optional>
         </dependency>
         <dependency>
-            <groupId>javax.annotation</groupId>
-            <artifactId>javax.annotation-api</artifactId>
+            <groupId>jakarta.annotation</groupId>
+            <artifactId>jakarta.annotation-api</artifactId>
             <scope>provided</scope>
             <optional>true</optional>
         </dependency>
             <artifactId>scala-library</artifactId>
         </dependency>
 
+        <dependency>
+            <groupId>com.typesafe.akka</groupId>
+            <artifactId>akka-testkit_2.13</artifactId>
+        </dependency>
         <dependency>
             <groupId>com.typesafe.akka</groupId>
             <artifactId>akka-actor-testkit-typed_2.13</artifactId>
             <artifactId>awaitility</artifactId>
         </dependency>
 
+        <dependency>
+            <groupId>com.typesafe</groupId>
+            <artifactId>config</artifactId>
+            <scope>test</scope>
+        </dependency>
         <dependency>
             <groupId>org.opendaylight.mdsal</groupId>
             <artifactId>mdsal-binding-dom-codec</artifactId>
             <artifactId>mdsal-binding-generator</artifactId>
             <scope>test</scope>
         </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>mdsal-binding-runtime-api</artifactId>
+            <scope>test</scope>
+        </dependency>
         <dependency>
             <groupId>org.opendaylight.mdsal</groupId>
             <artifactId>mdsal-binding-runtime-spi</artifactId>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.mdsal</groupId>
-            <artifactId>mdsal-singleton-common-api</artifactId>
+            <artifactId>mdsal-singleton-api</artifactId>
             <scope>test</scope>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.mdsal</groupId>
-            <artifactId>mdsal-singleton-dom-impl</artifactId>
+            <artifactId>mdsal-singleton-impl</artifactId>
             <scope>test</scope>
         </dependency>
 
index 9520b58d59f852b640d5d2a66a64799b2b493201..332fb44af7575423869362d3132a064e4fd226a2 100644 (file)
@@ -17,6 +17,7 @@ import akka.cluster.typed.Cluster;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.SettableFuture;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.time.Duration;
 import java.util.Optional;
 import java.util.Set;
@@ -57,17 +58,17 @@ import org.opendaylight.mdsal.binding.dom.codec.api.BindingInstanceIdentifierCod
 import org.opendaylight.mdsal.eos.common.api.CandidateAlreadyRegisteredException;
 import org.opendaylight.mdsal.eos.common.api.EntityOwnershipState;
 import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipCandidateRegistration;
 import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListenerRegistration;
 import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntities;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntitiesInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntitiesOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntity;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwner;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwnerInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwnerOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.OdlEntityOwnersService;
 import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.binding.RpcOutput;
 import org.opendaylight.yangtools.yang.common.Empty;
@@ -87,8 +88,7 @@ import org.slf4j.LoggerFactory;
  */
 @Singleton
 @Component(immediate = true, service = { DOMEntityOwnershipService.class, DataCenterControl.class })
-public class AkkaEntityOwnershipService implements DOMEntityOwnershipService, DataCenterControl, AutoCloseable,
-        OdlEntityOwnersService {
+public class AkkaEntityOwnershipService implements DOMEntityOwnershipService, DataCenterControl, AutoCloseable {
     private static final Logger LOG = LoggerFactory.getLogger(AkkaEntityOwnershipService.class);
     private static final String DATACENTER_PREFIX = "dc";
     private static final Duration DATACENTER_OP_TIMEOUT = Duration.ofSeconds(20);
@@ -140,12 +140,17 @@ public class AkkaEntityOwnershipService implements DOMEntityOwnershipService, Da
 
     @Inject
     @Activate
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR",
+        justification = "Non-final for testing 'this' reference is expected to be stable at registration time")
     public AkkaEntityOwnershipService(@Reference final ActorSystemProvider actorProvider,
             @Reference final RpcProviderService rpcProvider, @Reference final BindingCodecTree codecTree)
             throws ExecutionException, InterruptedException {
         this(actorProvider.getActorSystem(), codecTree);
 
-        reg = rpcProvider.registerRpcImplementation(OdlEntityOwnersService.class, this);
+        reg = rpcProvider.registerRpcImplementations(
+            (GetEntity) this::getEntity,
+            (GetEntities) this::getEntities,
+            (GetEntityOwner) this::getEntityOwner);
     }
 
     @PreDestroy
@@ -160,7 +165,7 @@ public class AkkaEntityOwnershipService implements DOMEntityOwnershipService, Da
     }
 
     @Override
-    public DOMEntityOwnershipCandidateRegistration registerCandidate(final DOMEntity entity)
+    public Registration registerCandidate(final DOMEntity entity)
             throws CandidateAlreadyRegisteredException {
         if (!registeredEntities.add(entity)) {
             throw new CandidateAlreadyRegisteredException(entity);
@@ -174,8 +179,7 @@ public class AkkaEntityOwnershipService implements DOMEntityOwnershipService, Da
     }
 
     @Override
-    public DOMEntityOwnershipListenerRegistration registerListener(final String entityType,
-                                                                   final DOMEntityOwnershipListener listener) {
+    public Registration registerListener(final String entityType, final DOMEntityOwnershipListener listener) {
         LOG.debug("Registering listener {} for type {}", listener, entityType);
         listenerRegistry.tell(new RegisterListener(entityType, listener));
 
@@ -221,21 +225,21 @@ public class AkkaEntityOwnershipService implements DOMEntityOwnershipService, Da
             AskPattern.ask(ownerSupervisor, DeactivateDataCenter::new, DATACENTER_OP_TIMEOUT, scheduler));
     }
 
-    @Override
-    public ListenableFuture<RpcResult<GetEntitiesOutput>> getEntities(final GetEntitiesInput input) {
+    @VisibleForTesting
+    final ListenableFuture<RpcResult<GetEntitiesOutput>> getEntities(final GetEntitiesInput input) {
         return toRpcFuture(AskPattern.ask(ownerStateChecker, GetEntitiesRequest::new, QUERY_TIMEOUT, scheduler),
                 reply -> reply.toOutput(iidCodec));
     }
 
-    @Override
-    public ListenableFuture<RpcResult<GetEntityOutput>> getEntity(final GetEntityInput input) {
+    @VisibleForTesting
+    final ListenableFuture<RpcResult<GetEntityOutput>> getEntity(final GetEntityInput input) {
         return toRpcFuture(AskPattern.ask(ownerStateChecker,
             (final ActorRef<GetEntityReply> replyTo) -> new GetEntityRequest(replyTo, input), QUERY_TIMEOUT, scheduler),
             GetEntityReply::toOutput);
     }
 
-    @Override
-    public ListenableFuture<RpcResult<GetEntityOwnerOutput>> getEntityOwner(final GetEntityOwnerInput input) {
+    @VisibleForTesting
+    final ListenableFuture<RpcResult<GetEntityOwnerOutput>> getEntityOwner(final GetEntityOwnerInput input) {
         return toRpcFuture(AskPattern.ask(ownerStateChecker,
             (final ActorRef<GetEntityOwnerReply> replyTo) -> new GetEntityOwnerRequest(replyTo, input), QUERY_TIMEOUT,
             scheduler), GetEntityOwnerReply::toOutput);
@@ -282,7 +286,7 @@ public class AkkaEntityOwnershipService implements DOMEntityOwnershipService, Da
                 future.setException(failure);
             } else {
                 LOG.debug("{} DataCenter successful", op);
-                future.set(Empty.getInstance());
+                future.set(Empty.value());
             }
         });
         return future;
index 56a2f099f67cfa9700cf6bce26e99afcc2b7d9fa..fd80ee1c5683cb16d2b5aff95bd70a09ad5692a1 100644 (file)
@@ -10,11 +10,9 @@ package org.opendaylight.controller.eos.akka;
 import static java.util.Objects.requireNonNull;
 
 import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipCandidateRegistration;
 import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
 
-final class CandidateRegistration extends AbstractObjectRegistration<DOMEntity>
-        implements DOMEntityOwnershipCandidateRegistration {
+final class CandidateRegistration extends AbstractObjectRegistration<DOMEntity> {
     private final AkkaEntityOwnershipService service;
 
     CandidateRegistration(final DOMEntity instance, final AkkaEntityOwnershipService service) {
index 568fbd7b68b61249200e0a65e779a514cc3d84ec..f70abc9a860690846fb01f4cbdaf9ff50c612e68 100644 (file)
@@ -7,7 +7,6 @@
  */
 package org.opendaylight.controller.eos.akka;
 
-import com.google.common.annotations.Beta;
 import com.google.common.util.concurrent.ListenableFuture;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.yangtools.yang.common.Empty;
@@ -23,7 +22,6 @@ import org.opendaylight.yangtools.yang.common.Empty;
  * on any node from the datacenter to be activated. Datacenters only need to brought up when using non-default
  * datacenter or multiple datacenters.
  */
-@Beta
 public interface DataCenterControl {
     /**
      * Activates the Entity Ownership Service in the datacenter that this method is called.
index 435babe8ec067f9a533afb5de6c0029086499ba8..8d101c24c69a5f8e0851955eba4fa54672ca4b7d 100644 (file)
@@ -12,11 +12,9 @@ import static java.util.Objects.requireNonNull;
 import com.google.common.base.MoreObjects;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListenerRegistration;
 import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
 
-final class ListenerRegistration extends AbstractObjectRegistration<DOMEntityOwnershipListener>
-        implements DOMEntityOwnershipListenerRegistration {
+final class ListenerRegistration extends AbstractObjectRegistration<DOMEntityOwnershipListener> {
     private final AkkaEntityOwnershipService service;
     private final @NonNull String entityType;
 
@@ -27,8 +25,7 @@ final class ListenerRegistration extends AbstractObjectRegistration<DOMEntityOwn
         this.service = requireNonNull(service);
     }
 
-    @Override
-    public  String getEntityType() {
+    public String entityType() {
         return entityType;
     }
 
index be1415ed5bf7a1f2e1f0d6b603ba07f4a1ca1447..dab699e394d5fc65bca8ab681d1cefdc9b045136 100644 (file)
@@ -9,6 +9,7 @@ package org.opendaylight.controller.eos.akka.bootstrap;
 
 import akka.actor.typed.ActorRef;
 import akka.actor.typed.Behavior;
+import akka.actor.typed.SupervisorStrategy;
 import akka.actor.typed.javadsl.AbstractBehavior;
 import akka.actor.typed.javadsl.ActorContext;
 import akka.actor.typed.javadsl.Behaviors;
@@ -43,12 +44,13 @@ public final class EOSMain extends AbstractBehavior<BootstrapCommand> {
         final String role = Cluster.get(context.getSystem()).selfMember().getRoles().iterator().next();
 
         listenerRegistry = context.spawn(EntityTypeListenerRegistry.create(role), "ListenerRegistry");
-        candidateRegistry = context.spawn(CandidateRegistryInit.create(), "CandidateRegistry");
 
         final ClusterSingleton clusterSingleton = ClusterSingleton.get(context.getSystem());
         // start the initial sync behavior that switches to the regular one after syncing
         ownerSupervisor = clusterSingleton.init(
-                SingletonActor.of(IdleSupervisor.create(iidCodec), "OwnerSupervisor"));
+                SingletonActor.of(Behaviors.supervise(IdleSupervisor.create(iidCodec))
+                        .onFailure(SupervisorStrategy.restart()), "OwnerSupervisor"));
+        candidateRegistry = context.spawn(CandidateRegistryInit.create(ownerSupervisor), "CandidateRegistry");
 
         ownerStateChecker = context.spawn(OwnerStateChecker.create(role, ownerSupervisor, iidCodec),
                 "OwnerStateChecker");
@@ -73,7 +75,7 @@ public final class EOSMain extends AbstractBehavior<BootstrapCommand> {
     }
 
     private Behavior<BootstrapCommand> onTerminate(final Terminate request) {
-        request.getReplyTo().tell(Empty.getInstance());
+        request.getReplyTo().tell(Empty.value());
         return Behaviors.stopped();
     }
 }
\ No newline at end of file
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/AbstractSupervisor.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/AbstractSupervisor.java
new file mode 100644 (file)
index 0000000..a4366b5
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor;
+
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.AbstractBehavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.DistributedData;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.time.Duration;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidates;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesForMember;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesResponse;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.slf4j.Logger;
+
+abstract class AbstractSupervisor extends AbstractBehavior<OwnerSupervisorCommand> {
+
+    final ReplicatorMessageAdapter<OwnerSupervisorCommand, ORMap<DOMEntity, ORSet<String>>> candidateReplicator;
+
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR",
+        justification = "getContext() is non-final")
+    AbstractSupervisor(final ActorContext<OwnerSupervisorCommand> context) {
+        super(context);
+
+        final ActorRef<Replicator.Command> replicator = DistributedData.get(getContext().getSystem()).replicator();
+        candidateReplicator = new ReplicatorMessageAdapter<>(getContext(), replicator, Duration.ofSeconds(5));
+    }
+
+    Behavior<OwnerSupervisorCommand> onClearCandidatesForMember(final ClearCandidatesForMember command) {
+        getLogger().debug("Clearing candidates for member: {}", command.getCandidate());
+
+        candidateReplicator.askGet(
+                askReplyTo -> new Replicator.Get<>(CandidateRegistry.KEY,
+                        new Replicator.ReadMajority(Duration.ofSeconds(15)), askReplyTo),
+                response -> new ClearCandidates(response, command));
+
+        return this;
+    }
+
+    Behavior<OwnerSupervisorCommand> finishClearCandidates(final ClearCandidates command) {
+        if (command.getResponse() instanceof Replicator.GetSuccess) {
+            getLogger().debug("Retrieved candidate data, clearing candidates for {}",
+                    command.getOriginalMessage().getCandidate());
+
+            getContext().spawnAnonymous(CandidateCleaner.create()).tell(command);
+        } else {
+            getLogger().debug("Unable to retrieve candidate data for {}, no candidates present sending empty reply",
+                    command.getOriginalMessage().getCandidate());
+            command.getOriginalMessage().getReplyTo().tell(new ClearCandidatesResponse());
+        }
+
+        return this;
+    }
+
+    abstract Logger getLogger();
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/CandidateCleaner.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/CandidateCleaner.java
new file mode 100644 (file)
index 0000000..8ce9adb
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor;
+
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.AbstractBehavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.SelfUniqueAddress;
+import akka.cluster.ddata.typed.javadsl.DistributedData;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter;
+import java.time.Duration;
+import java.util.Map;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidates;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesResponse;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesUpdateResponse;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Actor that can be spawned by all the supervisor implementations that executes clearing of candidates once
+ * candidate retrieval succeeds. Once candidates for the member are cleared(or immediately if none need to be cleared),
+ * the actor stops itself.
+ */
+public final class CandidateCleaner extends AbstractBehavior<OwnerSupervisorCommand> {
+    private static final Logger LOG = LoggerFactory.getLogger(CandidateCleaner.class);
+
+    private final ReplicatorMessageAdapter<OwnerSupervisorCommand, ORMap<DOMEntity, ORSet<String>>> candidateReplicator;
+    private final SelfUniqueAddress node;
+
+    private int remaining = 0;
+
+    private CandidateCleaner(final ActorContext<OwnerSupervisorCommand> context) {
+        super(context);
+
+        final ActorRef<Replicator.Command> replicator = DistributedData.get(getContext().getSystem()).replicator();
+        candidateReplicator = new ReplicatorMessageAdapter<>(getContext(), replicator, Duration.ofSeconds(5));
+        node = DistributedData.get(context.getSystem()).selfUniqueAddress();
+
+    }
+
+    public static Behavior<OwnerSupervisorCommand> create() {
+        return Behaviors.setup(CandidateCleaner::new);
+    }
+
+    @Override
+    public Receive<OwnerSupervisorCommand> createReceive() {
+        return newReceiveBuilder()
+                .onMessage(ClearCandidates.class, this::onClearCandidates)
+                .onMessage(ClearCandidatesUpdateResponse.class, this::onClearCandidatesUpdateResponse)
+                .build();
+    }
+
+    private Behavior<OwnerSupervisorCommand> onClearCandidates(final ClearCandidates command) {
+        LOG.debug("Clearing candidates for member: {}", command.getOriginalMessage().getCandidate());
+
+        final ORMap<DOMEntity, ORSet<String>> candidates =
+                ((Replicator.GetSuccess<ORMap<DOMEntity, ORSet<String>>>) command.getResponse())
+                        .get(CandidateRegistry.KEY);
+
+        for (final Map.Entry<DOMEntity, ORSet<String>> entry : candidates.getEntries().entrySet()) {
+            if (entry.getValue().contains(command.getOriginalMessage().getCandidate())) {
+                LOG.debug("Removing {} from {}", command.getOriginalMessage().getCandidate(), entry.getKey());
+
+                remaining++;
+                candidateReplicator.askUpdate(
+                        askReplyTo -> new Replicator.Update<>(
+                                CandidateRegistry.KEY,
+                                ORMap.empty(),
+                                new Replicator.WriteMajority(Duration.ofSeconds(10)),
+                                askReplyTo,
+                                map -> map.update(node, entry.getKey(), ORSet.empty(),
+                                        value -> value.remove(node, command.getOriginalMessage().getCandidate()))),
+                        updateResponse -> new ClearCandidatesUpdateResponse(updateResponse,
+                                command.getOriginalMessage().getReplyTo()));
+            }
+        }
+
+        if (remaining == 0) {
+            LOG.debug("Did not clear any candidates for {}", command.getOriginalMessage().getCandidate());
+            command.getOriginalMessage().getReplyTo().tell(new ClearCandidatesResponse());
+            return Behaviors.stopped();
+        }
+        return this;
+    }
+
+    private Behavior<OwnerSupervisorCommand> onClearCandidatesUpdateResponse(
+            final ClearCandidatesUpdateResponse command) {
+        remaining--;
+        if (remaining == 0) {
+            LOG.debug("Last update response for candidate removal received, replying to: {}", command.getReplyTo());
+            command.getReplyTo().tell(new ClearCandidatesResponse());
+            return Behaviors.stopped();
+        } else {
+            LOG.debug("Have still {} outstanding requests after {}", remaining, command.getResponse());
+        }
+        return this;
+    }
+}
index 2baeb62fc354028c541673d4a1ba569a752ea81b..3028552a1031c7a790e63d62a8d495609aff135e 100644 (file)
@@ -10,7 +10,6 @@ package org.opendaylight.controller.eos.akka.owner.supervisor;
 import static java.util.Objects.requireNonNull;
 
 import akka.actor.typed.Behavior;
-import akka.actor.typed.javadsl.AbstractBehavior;
 import akka.actor.typed.javadsl.ActorContext;
 import akka.actor.typed.javadsl.Behaviors;
 import akka.actor.typed.javadsl.Receive;
@@ -18,6 +17,8 @@ import akka.cluster.Member;
 import akka.cluster.typed.Cluster;
 import akka.pattern.StatusReply;
 import org.opendaylight.controller.eos.akka.owner.supervisor.command.ActivateDataCenter;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidates;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesForMember;
 import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntitiesBackendRequest;
 import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityBackendRequest;
 import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityOwnerBackendRequest;
@@ -32,7 +33,7 @@ import org.slf4j.LoggerFactory;
  * in the primary datacenter, or is activated on demand. Once the supervisor instance is no longer needed in the
  * secondary datacenter it needs to be deactivated manually.
  */
-public final class IdleSupervisor extends AbstractBehavior<OwnerSupervisorCommand> {
+public final class IdleSupervisor extends AbstractSupervisor {
     private static final Logger LOG = LoggerFactory.getLogger(IdleSupervisor.class);
 
     private static final String DATACENTER_PREFIX = "dc-";
@@ -56,7 +57,6 @@ public final class IdleSupervisor extends AbstractBehavior<OwnerSupervisorComman
     }
 
     public static Behavior<OwnerSupervisorCommand> create(final BindingInstanceIdentifierCodec iidCodec) {
-
         return Behaviors.setup(context -> new IdleSupervisor(context, iidCodec));
     }
 
@@ -67,6 +67,8 @@ public final class IdleSupervisor extends AbstractBehavior<OwnerSupervisorComman
                 .onMessage(GetEntitiesBackendRequest.class, this::onFailEntityRpc)
                 .onMessage(GetEntityBackendRequest.class, this::onFailEntityRpc)
                 .onMessage(GetEntityOwnerBackendRequest.class, this::onFailEntityRpc)
+                .onMessage(ClearCandidatesForMember.class, this::onClearCandidatesForMember)
+                .onMessage(ClearCandidates.class, this::finishClearCandidates)
                 .build();
     }
 
@@ -82,10 +84,15 @@ public final class IdleSupervisor extends AbstractBehavior<OwnerSupervisorComman
         return OwnerSyncer.create(message.getReplyTo(), iidCodec);
     }
 
-    private String extractDatacenterRole(final Member selfMember) {
+    private static String extractDatacenterRole(final Member selfMember) {
         return selfMember.getRoles().stream()
                 .filter(role -> role.startsWith(DATACENTER_PREFIX))
                 .findFirst()
                 .orElseThrow(() -> new IllegalArgumentException(selfMember + " does not have a valid role"));
     }
+
+    @Override
+    Logger getLogger() {
+        return LOG;
+    }
 }
index 9841b65b7bd7e53fff42ccbef4987369822b3b59..1e2a41beca3625f8521602f5129f005fdb2572d7 100644 (file)
@@ -12,7 +12,6 @@ import static java.util.Objects.requireNonNull;
 
 import akka.actor.typed.ActorRef;
 import akka.actor.typed.Behavior;
-import akka.actor.typed.javadsl.AbstractBehavior;
 import akka.actor.typed.javadsl.ActorContext;
 import akka.actor.typed.javadsl.Behaviors;
 import akka.actor.typed.javadsl.Receive;
@@ -47,6 +46,8 @@ import java.util.stream.Collectors;
 import java.util.stream.StreamSupport;
 import org.opendaylight.controller.eos.akka.owner.supervisor.command.AbstractEntityRequest;
 import org.opendaylight.controller.eos.akka.owner.supervisor.command.CandidatesChanged;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidates;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesForMember;
 import org.opendaylight.controller.eos.akka.owner.supervisor.command.DataCenterDeactivated;
 import org.opendaylight.controller.eos.akka.owner.supervisor.command.DeactivateDataCenter;
 import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntitiesBackendReply;
@@ -73,7 +74,7 @@ import scala.collection.JavaConverters;
  * registry in distributed-data and picks entity owners based on the current cluster state and registered candidates.
  * On cluster up/down etc. events the owners are reassigned if possible.
  */
-public final class OwnerSupervisor extends AbstractBehavior<OwnerSupervisorCommand> {
+public final class OwnerSupervisor extends AbstractSupervisor {
 
     private static final Logger LOG = LoggerFactory.getLogger(OwnerSupervisor.class);
     private static final String DATACENTER_PREFIX = "dc-";
@@ -83,7 +84,7 @@ public final class OwnerSupervisor extends AbstractBehavior<OwnerSupervisorComma
     // Our own clock implementation so we do not have to rely on synchronized clocks. This basically functions as an
     // increasing counter which is fine for our needs as we only ever have a single writer since t supervisor is
     // running in a cluster-singleton
-    private final LWWRegister.Clock<String> clock = (currentTimestamp, value) -> currentTimestamp + 1;
+    private static final LWWRegister.Clock<String> CLOCK = (currentTimestamp, value) -> currentTimestamp + 1;
 
     private final Cluster cluster;
     private final SelfUniqueAddress node;
@@ -152,8 +153,7 @@ public final class OwnerSupervisor extends AbstractBehavior<OwnerSupervisorComma
                 });
         cluster.subscriptions().tell(Subscribe.create(reachabilityEventAdapter, ClusterEvent.ReachabilityEvent.class));
 
-        new ReplicatorMessageAdapter<OwnerSupervisorCommand, ORMap<DOMEntity, ORSet<String>>>(context, replicator,
-            Duration.ofSeconds(5)).subscribe(CandidateRegistry.KEY, CandidatesChanged::new);
+        candidateReplicator.subscribe(CandidateRegistry.KEY, CandidatesChanged::new);
 
         LOG.debug("Owner Supervisor started");
     }
@@ -176,6 +176,8 @@ public final class OwnerSupervisor extends AbstractBehavior<OwnerSupervisorComma
                 .onMessage(GetEntitiesBackendRequest.class, this::onGetEntities)
                 .onMessage(GetEntityBackendRequest.class, this::onGetEntity)
                 .onMessage(GetEntityOwnerBackendRequest.class, this::onGetEntityOwner)
+                .onMessage(ClearCandidatesForMember.class, this::onClearCandidatesForMember)
+                .onMessage(ClearCandidates.class, this::finishClearCandidates)
                 .build();
     }
 
@@ -193,7 +195,7 @@ public final class OwnerSupervisor extends AbstractBehavior<OwnerSupervisorComma
     private void reassignUnreachableOwners() {
         final Set<String> ownersToReassign = new HashSet<>();
         for (final String owner : ownerToEntity.keys()) {
-            if (!activeMembers.contains(owner)) {
+            if (!isActiveCandidate(owner)) {
                 ownersToReassign.add(owner);
             }
         }
@@ -259,8 +261,10 @@ public final class OwnerSupervisor extends AbstractBehavior<OwnerSupervisorComma
                 LOG.debug("Adding new candidate for entity: {} : {}", entity, toCheck);
                 currentCandidates.get(entity).add(toCheck);
 
-                if (!currentOwners.containsKey(entity)) {
-                    // might as well assign right away when we don't have an owner
+                final String currentOwner = currentOwners.get(entity);
+
+                if (currentOwner == null || !activeMembers.contains(currentOwner)) {
+                    // might as well assign right away when we don't have an owner or its unreachable
                     assignOwnerFor(entity);
                 }
 
@@ -296,6 +300,13 @@ public final class OwnerSupervisor extends AbstractBehavior<OwnerSupervisorComma
         LOG.debug("Reassigning owners for {}", entities);
         for (final DOMEntity entity : entities) {
             if (predicate.test(entity, oldOwner)) {
+
+                if (!isActiveCandidate(oldOwner) && isCandidateFor(entity, oldOwner) && hasSingleCandidate(entity)) {
+                    // only skip new owner assignment, only if unreachable, still is a candidate and is the ONLY
+                    // candidate
+                    LOG.debug("{} is the only candidate for {}. Skipping reassignment.", oldOwner, entity);
+                    continue;
+                }
                 ownerToEntity.remove(oldOwner, entity);
                 assignOwnerFor(entity);
             }
@@ -310,6 +321,10 @@ public final class OwnerSupervisor extends AbstractBehavior<OwnerSupervisorComma
         return currentCandidates.getOrDefault(entity, Set.of()).contains(candidate);
     }
 
+    private boolean hasSingleCandidate(final DOMEntity entity) {
+        return currentCandidates.getOrDefault(entity, Set.of()).size() == 1;
+    }
+
     private void assignOwnerFor(final DOMEntity entity) {
         final Set<String> candidatesForEntity = currentCandidates.get(entity);
         if (candidatesForEntity.isEmpty()) {
@@ -356,7 +371,7 @@ public final class OwnerSupervisor extends AbstractBehavior<OwnerSupervisorComma
                         new LWWRegister<>(node.uniqueAddress(), candidate, 0),
                         Replicator.writeLocal(),
                         askReplyTo,
-                        register -> register.withValue(node, candidate, clock)),
+                        register -> register.withValue(node, candidate, CLOCK)),
                 OwnerChanged::new);
     }
 
@@ -465,4 +480,9 @@ public final class OwnerSupervisor extends AbstractBehavior<OwnerSupervisorComma
         return member.getRoles().stream().filter(role -> role.startsWith(DATACENTER_PREFIX))
                 .findFirst().orElseThrow(() -> new IllegalArgumentException("No valid role found."));
     }
+
+    @Override
+    Logger getLogger() {
+        return LOG;
+    }
 }
index 092f532dfbd06eb6a0097aef157627c4dfd19752..32a0a643695154247e1c396d84a54951af2f3c97 100644 (file)
@@ -11,7 +11,6 @@ import static java.util.Objects.requireNonNull;
 
 import akka.actor.typed.ActorRef;
 import akka.actor.typed.Behavior;
-import akka.actor.typed.javadsl.AbstractBehavior;
 import akka.actor.typed.javadsl.ActorContext;
 import akka.actor.typed.javadsl.Behaviors;
 import akka.actor.typed.javadsl.Receive;
@@ -29,6 +28,8 @@ import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidates;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesForMember;
 import org.opendaylight.controller.eos.akka.owner.supervisor.command.DataCenterActivated;
 import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntitiesBackendRequest;
 import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityBackendRequest;
@@ -48,7 +49,7 @@ import org.slf4j.LoggerFactory;
  * Behavior that retrieves current candidates/owners from distributed-data and switches to OwnerSupervisor when the
  * sync has finished.
  */
-public final class OwnerSyncer extends AbstractBehavior<OwnerSupervisorCommand> {
+public final class OwnerSyncer extends AbstractSupervisor {
     private static final Logger LOG = LoggerFactory.getLogger(OwnerSyncer.class);
 
     private final ReplicatorMessageAdapter<OwnerSupervisorCommand, LWWRegister<String>> ownerReplicator;
@@ -72,8 +73,7 @@ public final class OwnerSyncer extends AbstractBehavior<OwnerSupervisorCommand>
 
         ownerReplicator = new ReplicatorMessageAdapter<>(context, replicator, Duration.ofSeconds(5));
 
-        new ReplicatorMessageAdapter<OwnerSupervisorCommand, ORMap<DOMEntity, ORSet<String>>>(context, replicator,
-            Duration.ofSeconds(5)).askGet(
+        candidateReplicator.askGet(
                 askReplyTo -> new Replicator.Get<>(CandidateRegistry.KEY, Replicator.readLocal(), askReplyTo),
                 InitialCandidateSync::new);
 
@@ -95,6 +95,8 @@ public final class OwnerSyncer extends AbstractBehavior<OwnerSupervisorCommand>
                 .onMessage(GetEntitiesBackendRequest.class, this::onFailEntityRpc)
                 .onMessage(GetEntityBackendRequest.class, this::onFailEntityRpc)
                 .onMessage(GetEntityOwnerBackendRequest.class, this::onFailEntityRpc)
+                .onMessage(ClearCandidatesForMember.class, this::onClearCandidatesForMember)
+                .onMessage(ClearCandidates.class, this::finishClearCandidates)
                 .build();
     }
 
@@ -176,4 +178,9 @@ public final class OwnerSyncer extends AbstractBehavior<OwnerSupervisorCommand>
     private static void handleNotFoundOwnerRsp(final Replicator.NotFound<LWWRegister<String>> rsp) {
         LOG.debug("Owner not found. {}", rsp);
     }
+
+    @Override
+    Logger getLogger() {
+        return LOG;
+    }
 }
@@ -5,22 +5,29 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-package org.opendaylight.controller.eos.akka.registry.candidate.command;
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
 
 import akka.cluster.ddata.ORMap;
 import akka.cluster.ddata.ORSet;
 import akka.cluster.ddata.typed.javadsl.Replicator;
 import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
 
-public class InitialCandidateSync extends CandidateRegistryCommand {
+public class ClearCandidates extends OwnerSupervisorCommand {
 
     private final Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>> response;
+    private final ClearCandidatesForMember originalMessage;
 
-    public InitialCandidateSync(final Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>> response) {
+    public ClearCandidates(final Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>> response,
+                           final ClearCandidatesForMember originalMessage) {
         this.response = response;
+        this.originalMessage = originalMessage;
     }
 
     public Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>> getResponse() {
         return response;
     }
+
+    public ClearCandidatesForMember getOriginalMessage() {
+        return originalMessage;
+    }
 }
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesForMember.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesForMember.java
new file mode 100644 (file)
index 0000000..1e27cb5
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.typed.ActorRef;
+import java.io.Serializable;
+
+/**
+ * Request sent from Candidate registration actors to clear the candidate from all entities. Issued at start to clear
+ * candidates from previous iteration of a node. Owner supervisor responds to this request to notify the registration
+ * actor it can start up and process candidate requests.
+ */
+public class ClearCandidatesForMember extends OwnerSupervisorCommand implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    private final ActorRef<ClearCandidatesResponse> replyTo;
+    private final String candidate;
+
+    public ClearCandidatesForMember(final ActorRef<ClearCandidatesResponse> replyTo, final String candidate) {
+        this.replyTo = replyTo;
+        this.candidate = candidate;
+    }
+
+    public ActorRef<ClearCandidatesResponse> getReplyTo() {
+        return replyTo;
+    }
+
+    public String getCandidate() {
+        return candidate;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesResponse.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesResponse.java
new file mode 100644 (file)
index 0000000..7399bd8
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import java.io.Serializable;
+
+/**
+ * Response sent from OwnerSupervisor to the ClearCandidatesForMember request, notifying the caller that removal has
+ * finished.
+ */
+public class ClearCandidatesResponse implements Serializable {
+
+    private static final long serialVersionUID = 1L;
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesUpdateResponse.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesUpdateResponse.java
new file mode 100644 (file)
index 0000000..9f48323
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.typed.ActorRef;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public class ClearCandidatesUpdateResponse extends OwnerSupervisorCommand {
+    private final Replicator.UpdateResponse<ORMap<DOMEntity, ORSet<String>>> response;
+    private final ActorRef<ClearCandidatesResponse> replyTo;
+
+    public ClearCandidatesUpdateResponse(final Replicator.UpdateResponse<ORMap<DOMEntity, ORSet<String>>> response,
+                                         final ActorRef<ClearCandidatesResponse> replyTo) {
+        this.response = response;
+        this.replyTo = replyTo;
+    }
+
+    public Replicator.UpdateResponse<ORMap<DOMEntity, ORSet<String>>> getResponse() {
+        return response;
+    }
+
+
+    public ActorRef<ClearCandidatesResponse> getReplyTo() {
+        return replyTo;
+    }
+}
index 16c2ab625830d80f70560498925862816461b4a7..03ecbae10a7c418448eccaf3b7e00cd832e33c9d 100644 (file)
@@ -12,6 +12,7 @@ import akka.actor.typed.javadsl.AbstractBehavior;
 import akka.actor.typed.javadsl.ActorContext;
 import akka.actor.typed.javadsl.Behaviors;
 import akka.actor.typed.javadsl.Receive;
+import akka.cluster.Cluster;
 import akka.cluster.ddata.Key;
 import akka.cluster.ddata.ORMap;
 import akka.cluster.ddata.ORMapKey;
@@ -20,6 +21,7 @@ import akka.cluster.ddata.SelfUniqueAddress;
 import akka.cluster.ddata.typed.javadsl.DistributedData;
 import akka.cluster.ddata.typed.javadsl.Replicator;
 import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter;
+import java.util.Set;
 import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRegistryCommand;
 import org.opendaylight.controller.eos.akka.registry.candidate.command.InternalUpdateResponse;
 import org.opendaylight.controller.eos.akka.registry.candidate.command.RegisterCandidate;
@@ -35,10 +37,13 @@ public final class CandidateRegistry extends AbstractBehavior<CandidateRegistryC
 
     private static final Logger LOG = LoggerFactory.getLogger(CandidateRegistry.class);
 
+    private static final String DATACENTER_PREFIX = "dc-";
+
     public static final Key<ORMap<DOMEntity, ORSet<String>>> KEY = new ORMapKey<>("candidateRegistry");
 
     private final ReplicatorMessageAdapter<CandidateRegistryCommand, ORMap<DOMEntity, ORSet<String>>> replicatorAdapter;
     private final SelfUniqueAddress node;
+    private final String selfRole;
 
     private CandidateRegistry(final ActorContext<CandidateRegistryCommand> context,
                               final ReplicatorMessageAdapter<CandidateRegistryCommand,
@@ -47,8 +52,9 @@ public final class CandidateRegistry extends AbstractBehavior<CandidateRegistryC
         this.replicatorAdapter = replicatorAdapter;
 
         this.node = DistributedData.get(context.getSystem()).selfUniqueAddress();
+        this.selfRole = extractRole(Cluster.get(context.getSystem()).selfMember().getRoles());
 
-        LOG.debug("Candidate registry started");
+        LOG.debug("{} : Candidate registry started", selfRole);
     }
 
     public static Behavior<CandidateRegistryCommand> create() {
@@ -69,7 +75,7 @@ public final class CandidateRegistry extends AbstractBehavior<CandidateRegistryC
     }
 
     private Behavior<CandidateRegistryCommand> onRegisterCandidate(final RegisterCandidate registerCandidate) {
-        LOG.debug("Registering candidate({}) for entity: {}",
+        LOG.debug("{} - Registering candidate({}) for entity: {}", selfRole,
                 registerCandidate.getCandidate(), registerCandidate.getEntity());
         replicatorAdapter.askUpdate(
                 askReplyTo -> new Replicator.Update<>(
@@ -84,7 +90,7 @@ public final class CandidateRegistry extends AbstractBehavior<CandidateRegistryC
     }
 
     private Behavior<CandidateRegistryCommand> onUnregisterCandidate(final UnregisterCandidate unregisterCandidate) {
-        LOG.debug("Removing candidate({}) from entity: {}",
+        LOG.debug("{} - Removing candidate({}) from entity: {}", selfRole,
                 unregisterCandidate.getCandidate(), unregisterCandidate.getEntity());
         replicatorAdapter.askUpdate(
                 askReplyTo -> new Replicator.Update<>(
@@ -99,7 +105,12 @@ public final class CandidateRegistry extends AbstractBehavior<CandidateRegistryC
     }
 
     private Behavior<CandidateRegistryCommand> onInternalUpdateResponse(final InternalUpdateResponse updateResponse) {
-        LOG.debug("Received update response: {}", updateResponse.getRsp());
+        LOG.debug("{} : Received update response: {}", selfRole, updateResponse.getRsp());
         return this;
     }
+
+    private static String extractRole(final Set<String> roles) {
+        return roles.stream().filter(role -> !role.contains(DATACENTER_PREFIX))
+                .findFirst().orElseThrow(() -> new IllegalArgumentException("No valid role found."));
+    }
 }
index 34cfe78bad9fc915ae702137f40e0025e2de0b78..f9ca06896e67f3cd9e7418cec4aa3a2676d308c2 100644 (file)
@@ -7,28 +7,25 @@
  */
 package org.opendaylight.controller.eos.akka.registry.candidate;
 
+import akka.actor.typed.ActorRef;
 import akka.actor.typed.Behavior;
 import akka.actor.typed.javadsl.AbstractBehavior;
 import akka.actor.typed.javadsl.ActorContext;
 import akka.actor.typed.javadsl.Behaviors;
 import akka.actor.typed.javadsl.Receive;
 import akka.actor.typed.javadsl.StashBuffer;
-import akka.cluster.ddata.ORMap;
-import akka.cluster.ddata.ORSet;
-import akka.cluster.ddata.SelfUniqueAddress;
-import akka.cluster.ddata.typed.javadsl.DistributedData;
-import akka.cluster.ddata.typed.javadsl.Replicator;
-import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter;
-import akka.cluster.typed.Cluster;
+import akka.cluster.Cluster;
 import java.time.Duration;
-import java.util.Map;
 import java.util.Set;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesForMember;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesResponse;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
 import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRegistryCommand;
-import org.opendaylight.controller.eos.akka.registry.candidate.command.InitialCandidateSync;
-import org.opendaylight.controller.eos.akka.registry.candidate.command.InternalUpdateResponse;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRemovalFailed;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRemovalFinished;
 import org.opendaylight.controller.eos.akka.registry.candidate.command.RegisterCandidate;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.RemovePreviousCandidates;
 import org.opendaylight.controller.eos.akka.registry.candidate.command.UnregisterCandidate;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -39,91 +36,70 @@ public class CandidateRegistryInit extends AbstractBehavior<CandidateRegistryCom
     private static final String DATACENTER_PREFIX = "dc-";
 
     private final StashBuffer<CandidateRegistryCommand> stash;
-    private final ReplicatorMessageAdapter<CandidateRegistryCommand,
-            ORMap<DOMEntity, ORSet<String>>> candidateReplicator;
+    private final ActorRef<OwnerSupervisorCommand> ownerSupervisor;
     private final String selfRole;
-    private final SelfUniqueAddress node;
 
     public CandidateRegistryInit(final ActorContext<CandidateRegistryCommand> ctx,
                                  final StashBuffer<CandidateRegistryCommand> stash,
-                                 final ReplicatorMessageAdapter<CandidateRegistryCommand,
-                                         ORMap<DOMEntity, ORSet<String>>> candidateReplicator) {
+                                 final ActorRef<OwnerSupervisorCommand> ownerSupervisor) {
         super(ctx);
         this.stash = stash;
-        this.candidateReplicator = candidateReplicator;
-        selfRole = extractRole(Cluster.get(ctx.getSystem()).selfMember().getRoles());
+        this.ownerSupervisor = ownerSupervisor;
+        this.selfRole = extractRole(Cluster.get(ctx.getSystem()).selfMember().getRoles());
 
-        this.node = DistributedData.get(ctx.getSystem()).selfUniqueAddress();
+        ctx.getSelf().tell(new RemovePreviousCandidates());
 
-
-        this.candidateReplicator.askGet(
-                askReplyTo -> new Replicator.Get<>(
-                        CandidateRegistry.KEY,
-                        new Replicator.ReadAll(Duration.ofSeconds(15)), askReplyTo),
-                InitialCandidateSync::new);
-
-        LOG.debug("CandidateRegistry syncing behavior started.");
+        LOG.debug("{} : CandidateRegistry syncing behavior started.", selfRole);
     }
 
-    public static Behavior<CandidateRegistryCommand> create() {
+    public static Behavior<CandidateRegistryCommand> create(final ActorRef<OwnerSupervisorCommand> ownerSupervisor) {
         return Behaviors.withStash(100,
                 stash ->
-                        Behaviors.setup(ctx -> DistributedData.withReplicatorMessageAdapter(
-                                (ReplicatorMessageAdapter<CandidateRegistryCommand,
-                                        ORMap<DOMEntity, ORSet<String>>> replicatorAdapter) ->
-                                        new CandidateRegistryInit(ctx, stash, replicatorAdapter))));
+                        Behaviors.setup(ctx -> new CandidateRegistryInit(ctx, stash, ownerSupervisor)));
     }
 
     @Override
     public Receive<CandidateRegistryCommand> createReceive() {
         return newReceiveBuilder()
-                .onMessage(InitialCandidateSync.class, this::handleCandidateSync)
+                .onMessage(RemovePreviousCandidates.class, this::onRemoveCandidates)
+                .onMessage(CandidateRemovalFinished.class, command -> switchToCandidateRegistry())
+                .onMessage(CandidateRemovalFailed.class, this::candidateRemovalFailed)
                 .onMessage(RegisterCandidate.class, this::stashCommand)
                 .onMessage(UnregisterCandidate.class, this::stashCommand)
                 .build();
     }
 
-    private Behavior<CandidateRegistryCommand> stashCommand(final CandidateRegistryCommand command) {
-        stash.stash(command);
+    private Behavior<CandidateRegistryCommand> candidateRemovalFailed(final CandidateRemovalFailed command) {
+        LOG.warn("{} : Initial removal of candidates from previous iteration failed. Rescheduling.", selfRole,
+                command.getThrowable());
+        getContext().getSelf().tell(new RemovePreviousCandidates());
         return this;
     }
 
-    private Behavior<CandidateRegistryCommand> handleCandidateSync(final InitialCandidateSync command) {
-        final Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>> response = command.getResponse();
-        if (response instanceof Replicator.GetSuccess) {
-            clearExistingCandidates((Replicator.GetSuccess<ORMap<DOMEntity, ORSet<String>>>) response);
-        }
-        // TODO implement other cases if needed, seems like only a retry would be needed here when we get a failure
-        // from distributed data
-        return switchToCandidateRegistry();
-    }
-
-    private void clearExistingCandidates(final Replicator.GetSuccess<ORMap<DOMEntity, ORSet<String>>> response) {
-        final Map<DOMEntity, ORSet<String>> entitiesToCandidates = response.get(response.key()).getEntries();
+    private Behavior<CandidateRegistryCommand> onRemoveCandidates(final RemovePreviousCandidates command) {
+        LOG.debug("Sending RemovePreviousCandidates.");
+        getContext().ask(ClearCandidatesResponse.class,
+                ownerSupervisor, Duration.ofSeconds(5),
+                ref -> new ClearCandidatesForMember(ref, selfRole),
+                (response, throwable) -> {
+                    if (response != null) {
+                        return new CandidateRemovalFinished();
+                    } else {
+                        return new CandidateRemovalFailed(throwable);
+                    }
+                });
 
-        for (Map.Entry<DOMEntity, ORSet<String>> entry : entitiesToCandidates.entrySet()) {
-            if (entry.getValue().getElements().contains(selfRole)) {
-                LOG.debug("Clearing candidate: {} from entity: {}, current state of entity candidates: {}",
-                        selfRole, entry.getKey(), entry.getValue().getElements());
-                clearRegistration(entry.getKey());
-            }
-        }
+        return this;
     }
 
-    private void clearRegistration(final DOMEntity entity) {
-        candidateReplicator.askUpdate(
-                askReplyTo -> new Replicator.Update<>(
-                        CandidateRegistry.KEY,
-                        ORMap.empty(),
-                        Replicator.writeLocal(),
-                        askReplyTo,
-                        map -> map.update(node, entity, ORSet.empty(),
-                                value -> value.remove(node, selfRole))),
-                InternalUpdateResponse::new);
+    private Behavior<CandidateRegistryCommand> stashCommand(final CandidateRegistryCommand command) {
+        LOG.debug("Stashing {}", command);
+        stash.stash(command);
+        return this;
     }
 
     private Behavior<CandidateRegistryCommand> switchToCandidateRegistry() {
-        LOG.debug("Clearing of candidates from previous instance done, switching to CandidateRegistry.");
+        LOG.debug("{} : Clearing of candidates from previous instance done, switching to CandidateRegistry.", selfRole);
         return stash.unstashAll(CandidateRegistry.create());
     }
 
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/CandidateRemovalFailed.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/CandidateRemovalFailed.java
new file mode 100644 (file)
index 0000000..0410942
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.candidate.command;
+
+public class CandidateRemovalFailed extends CandidateRegistryCommand {
+
+    private final Throwable throwable;
+
+    public CandidateRemovalFailed(final Throwable throwable) {
+        this.throwable = throwable;
+    }
+
+    public Throwable getThrowable() {
+        return throwable;
+    }
+}
@@ -5,5 +5,7 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-@org.osgi.service.component.annotations.RequireServiceComponentRuntime
-package org.opendaylight.controller.config.yang.netty.threadgroup;
\ No newline at end of file
+package org.opendaylight.controller.eos.akka.registry.candidate.command;
+
+public class CandidateRemovalFinished extends CandidateRegistryCommand {
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/RemovePreviousCandidates.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/RemovePreviousCandidates.java
new file mode 100644 (file)
index 0000000..9e1da1e
--- /dev/null
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.candidate.command;
+
+/**
+ * Message sent to candidate registry initial behavior by self to trigger and retrigger(in case of failures) removal
+ * of candidates registered by the previous iteration of this node.
+ */
+public class RemovePreviousCandidates extends CandidateRegistryCommand {
+}
index 279ee8fa8ec88bf6d4d22dd150c6bed004dd7799..4419fdf4db4b6e85fce9f5abe6338c759f7a5b28 100644 (file)
@@ -24,9 +24,8 @@ import org.opendaylight.controller.eos.akka.registry.listener.owner.command.List
 import org.opendaylight.controller.eos.akka.registry.listener.owner.command.OwnerChanged;
 import org.opendaylight.controller.eos.akka.registry.listener.type.command.EntityOwnerChanged;
 import org.opendaylight.controller.eos.akka.registry.listener.type.command.TypeListenerCommand;
-import org.opendaylight.mdsal.eos.common.api.EntityOwnershipChangeState;
+import org.opendaylight.mdsal.eos.common.api.EntityOwnershipStateChange;
 import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipChange;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -99,9 +98,7 @@ public class SingleEntityListenerActor extends AbstractBehavior<ListenerCommand>
 
     private void triggerNoOwnerNotification() {
         LOG.debug("Triggering initial notification without an owner for: {}", entity);
-
-        toNotify.tell(new EntityOwnerChanged(new DOMEntityOwnershipChange(
-                entity, EntityOwnershipChangeState.REMOTE_OWNERSHIP_LOST_NO_OWNER)));
+        toNotify.tell(new EntityOwnerChanged(entity, EntityOwnershipStateChange.REMOTE_OWNERSHIP_LOST_NO_OWNER, false));
     }
 
     private Behavior<ListenerCommand> onOwnerChanged(final OwnerChanged ownerChanged) {
@@ -133,8 +130,8 @@ public class SingleEntityListenerActor extends AbstractBehavior<ListenerCommand>
 
         currentOwner = newOwner;
 
-        toNotify.tell(new EntityOwnerChanged(new DOMEntityOwnershipChange(
-                entity, EntityOwnershipChangeState.from(wasOwner, isOwner, hasOwner))));
+        toNotify.tell(new EntityOwnerChanged(entity, EntityOwnershipStateChange.from(wasOwner, isOwner, hasOwner),
+            false));
     }
 
     private void handleOwnerLost(final Replicator.Deleted<LWWRegister<String>> changed) {
@@ -143,7 +140,6 @@ public class SingleEntityListenerActor extends AbstractBehavior<ListenerCommand>
         LOG.debug("Owner lost for entity:{}, currentOwner: {}, wasOwner: {}", entity, currentOwner, wasOwner);
 
         currentOwner = "";
-        toNotify.tell(new EntityOwnerChanged(new DOMEntityOwnershipChange(
-                entity, EntityOwnershipChangeState.from(wasOwner, false, false))));
+        toNotify.tell(new EntityOwnerChanged(entity, EntityOwnershipStateChange.from(wasOwner, false, false), false));
     }
 }
index e97fe77a2f4ec062a18c6c868190ee3d3466892c..7e445c581c268f6adc544b11c2473c36bcae7f22 100644 (file)
@@ -110,8 +110,7 @@ public class EntityTypeListenerActor extends AbstractBehavior<TypeListenerComman
 
     private Behavior<TypeListenerCommand> onOwnerChanged(final EntityOwnerChanged rsp) {
         LOG.debug("{} : Entity-type: {} listener, owner change: {}", localMember, entityType, rsp);
-
-        listener.ownershipChanged(rsp.getOwnershipChange());
+        listener.ownershipChanged(rsp.entity(), rsp.change(), false);
         return this;
     }
 
index 02d0e2fe50fc364bece30e21d12a47baca6f12f9..ee0f54f4310425e23e41c10df122608575a29129 100644 (file)
@@ -10,25 +10,45 @@ package org.opendaylight.controller.eos.akka.registry.listener.type.command;
 import static java.util.Objects.requireNonNull;
 
 import com.google.common.base.MoreObjects;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipChange;
+import org.eclipse.jdt.annotation.NonNullByDefault;
+import org.opendaylight.controller.eos.akka.registry.listener.type.EntityTypeListenerActor;
+import org.opendaylight.mdsal.eos.common.api.EntityOwnershipStateChange;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
 
 /**
- * Notification sent to EntityTypeListenerActor when there is an owner change for an Entity of a given type.
+ * Notification sent to {@link EntityTypeListenerActor} when there is an owner change for an Entity of a given type.
  */
+@NonNullByDefault
 public final class EntityOwnerChanged extends TypeListenerCommand {
-    private final @NonNull DOMEntityOwnershipChange ownershipChange;
+    private final DOMEntity entity;
+    private final EntityOwnershipStateChange change;
+    private final boolean inJeopardy;
 
-    public EntityOwnerChanged(final DOMEntityOwnershipChange ownershipChange) {
-        this.ownershipChange = requireNonNull(ownershipChange);
+    public EntityOwnerChanged(final DOMEntity entity, final EntityOwnershipStateChange change,
+            final boolean inJeopardy) {
+        this.entity = requireNonNull(entity);
+        this.change = requireNonNull(change);
+        this.inJeopardy = requireNonNull(inJeopardy);
     }
 
-    public @NonNull DOMEntityOwnershipChange getOwnershipChange() {
-        return ownershipChange;
+    public DOMEntity entity() {
+        return entity;
+    }
+
+    public EntityOwnershipStateChange change() {
+        return change;
+    }
+
+    public boolean inJeopardy() {
+        return inJeopardy;
     }
 
     @Override
     public String toString() {
-        return MoreObjects.toStringHelper(this).add("ownershipChange", ownershipChange).toString();
+        return MoreObjects.toStringHelper(this)
+            .add("entity", entity)
+            .add("change", change)
+            .add("inJeopardy", inJeopardy)
+            .toString();
     }
 }
index 27b4bcba84deb45a14db68bf4c906bc6d4e96b08..6adba42c098d8e65dba74a735cc88e821884c940 100644 (file)
@@ -19,6 +19,8 @@ import akka.actor.typed.javadsl.AskPattern;
 import akka.actor.typed.javadsl.Behaviors;
 import akka.cluster.ddata.LWWRegister;
 import akka.cluster.ddata.LWWRegisterKey;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
 import akka.cluster.ddata.typed.javadsl.DistributedData;
 import akka.cluster.ddata.typed.javadsl.Replicator;
 import com.typesafe.config.Config;
@@ -44,17 +46,19 @@ import org.opendaylight.controller.eos.akka.owner.supervisor.command.MemberReach
 import org.opendaylight.controller.eos.akka.owner.supervisor.command.MemberUnreachableEvent;
 import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
 import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorReply;
+import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry;
 import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRegistryCommand;
 import org.opendaylight.controller.eos.akka.registry.candidate.command.RegisterCandidate;
 import org.opendaylight.controller.eos.akka.registry.candidate.command.UnregisterCandidate;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.EntityOwnerChanged;
 import org.opendaylight.controller.eos.akka.registry.listener.type.command.RegisterListener;
 import org.opendaylight.controller.eos.akka.registry.listener.type.command.TypeListenerRegistryCommand;
 import org.opendaylight.mdsal.binding.dom.codec.impl.BindingCodecContext;
 import org.opendaylight.mdsal.binding.generator.impl.DefaultBindingRuntimeGenerator;
 import org.opendaylight.mdsal.binding.runtime.api.BindingRuntimeGenerator;
 import org.opendaylight.mdsal.binding.runtime.spi.BindingRuntimeHelpers;
+import org.opendaylight.mdsal.eos.common.api.EntityOwnershipStateChange;
 import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipChange;
 import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -263,7 +267,7 @@ public abstract class AbstractNativeEosTest {
     }
 
     protected static void waitUntillOwnerPresent(final ClusterNode clusterNode, final DOMEntity entity) {
-        await().until(() -> {
+        await().atMost(Duration.ofSeconds(15)).until(() -> {
             final DistributedData distributedData = DistributedData.get(clusterNode.getActorSystem());
             final CompletionStage<Replicator.GetResponse<LWWRegister<String>>> ask =
                     AskPattern.ask(distributedData.replicator(),
@@ -284,6 +288,32 @@ public abstract class AbstractNativeEosTest {
         });
     }
 
+    protected static void waitUntillCandidatePresent(final ClusterNode clusterNode, final DOMEntity entity,
+                                                     final String candidate) {
+        await().atMost(Duration.ofSeconds(15)).until(() -> {
+            final DistributedData distributedData = DistributedData.get(clusterNode.getActorSystem());
+
+            final CompletionStage<Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>>> ask =
+                    AskPattern.ask(distributedData.replicator(),
+                            replyTo -> new Replicator.Get<>(
+                                    CandidateRegistry.KEY, Replicator.readLocal(), replyTo),
+                            Duration.ofSeconds(5),
+                            clusterNode.getActorSystem().scheduler());
+
+            final Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>> response =
+                    ask.toCompletableFuture().get(5, TimeUnit.SECONDS);
+
+            if (response instanceof Replicator.GetSuccess) {
+                final Map<DOMEntity, ORSet<String>> entries =
+                        ((Replicator.GetSuccess<ORMap<DOMEntity, ORSet<String>>>) response).dataValue().getEntries();
+
+                return entries.get(entity).contains(candidate);
+
+            }
+            return false;
+        });
+    }
+
     protected static CompletableFuture<OwnerSupervisorReply> activateDatacenter(final ClusterNode clusterNode) {
         final CompletionStage<OwnerSupervisorReply> ask =
                 AskPattern.ask(clusterNode.getOwnerSupervisor(),
@@ -306,14 +336,14 @@ public abstract class AbstractNativeEosTest {
                                               final boolean hasOwner, final boolean isOwner, final boolean wasOwner) {
         await().until(() -> !listener.getChanges().isEmpty());
 
-        await().untilAsserted(() -> {
-            final List<DOMEntityOwnershipChange> changes = listener.getChanges();
-            final DOMEntityOwnershipChange domEntityOwnershipChange = listener.getChanges().get(changes.size() - 1);
-            assertEquals(entity, domEntityOwnershipChange.getEntity());
+        await().atMost(Duration.ofSeconds(10)).untilAsserted(() -> {
+            final var changes = listener.getChanges();
+            final var domEntityOwnershipChange = listener.getChanges().get(changes.size() - 1);
+            assertEquals(entity, domEntityOwnershipChange.entity());
 
-            assertEquals(hasOwner, domEntityOwnershipChange.getState().hasOwner());
-            assertEquals(isOwner, domEntityOwnershipChange.getState().isOwner());
-            assertEquals(wasOwner, domEntityOwnershipChange.getState().wasOwner());
+            assertEquals(hasOwner, domEntityOwnershipChange.change().hasOwner());
+            assertEquals(isOwner, domEntityOwnershipChange.change().isOwner());
+            assertEquals(wasOwner, domEntityOwnershipChange.change().wasOwner());
         });
     }
 
@@ -386,11 +416,9 @@ public abstract class AbstractNativeEosTest {
     }
 
     protected static final class MockEntityOwnershipListener implements DOMEntityOwnershipListener {
-
-        private final Logger log;
-
-        private final List<DOMEntityOwnershipChange> changes = new ArrayList<>();
+        private final List<EntityOwnerChanged> changes = new ArrayList<>();
         private final String member;
+        private final Logger log;
 
         public MockEntityOwnershipListener(final String member) {
             log = LoggerFactory.getLogger("EOS-listener-" + member);
@@ -398,13 +426,15 @@ public abstract class AbstractNativeEosTest {
         }
 
         @Override
-        public void ownershipChanged(final DOMEntityOwnershipChange ownershipChange) {
-            log.info("{} Received ownershipCHanged: {}", member, ownershipChange);
+        public void ownershipChanged(final DOMEntity entity, final EntityOwnershipStateChange change,
+                final boolean inJeopardy) {
+            final var changed = new EntityOwnerChanged(entity, change, inJeopardy);
+            log.info("{} Received ownershipCHanged: {}", member, changed);
             log.info("{} changes: {}", member, changes.size());
-            changes.add(ownershipChange);
+            changes.add(changed);
         }
 
-        public List<DOMEntityOwnershipChange> getChanges() {
+        public List<EntityOwnerChanged> getChanges() {
             return changes;
         }
 
index 199e7931bb1e8a6234302a91ff2e1b23f0f3c970..652102f903508a58b5e2b4c595835f5efe29a827 100644 (file)
@@ -26,6 +26,7 @@ import akka.cluster.ddata.typed.javadsl.DistributedData;
 import akka.cluster.ddata.typed.javadsl.Replicator;
 import com.typesafe.config.ConfigFactory;
 import java.time.Duration;
+import java.util.List;
 import java.util.Map;
 import java.util.Optional;
 import java.util.concurrent.CompletionStage;
@@ -40,24 +41,19 @@ import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry
 import org.opendaylight.mdsal.eos.common.api.CandidateAlreadyRegisteredException;
 import org.opendaylight.mdsal.eos.common.api.EntityOwnershipState;
 import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipCandidateRegistration;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListenerRegistration;
 import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipService;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityName;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityType;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntitiesInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntitiesOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwnerInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwnerOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.NodeName;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.get.entities.output.EntitiesKey;
 import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology;
 import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
 import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
@@ -75,7 +71,7 @@ public class AkkaEntityOwnershipServiceTest extends AbstractNativeEosTest {
     @Before
     public void setUp() throws Exception {
         system = ActorSystem.create("ClusterSystem", ConfigFactory.load());
-        typedSystem = Adapter.toTyped(this.system);
+        typedSystem = Adapter.toTyped(system);
         replicator = DistributedData.get(typedSystem).replicator();
 
         service = new AkkaEntityOwnershipService(system, CODEC_CONTEXT);
@@ -92,9 +88,9 @@ public class AkkaEntityOwnershipServiceTest extends AbstractNativeEosTest {
         final YangInstanceIdentifier entityId = YangInstanceIdentifier.of(QNAME);
         final DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
 
-        final DOMEntityOwnershipCandidateRegistration reg = service.registerCandidate(entity);
+        final Registration reg = service.registerCandidate(entity);
+        assertNotNull(reg);
 
-        verifyEntityOwnershipCandidateRegistration(entity, reg);
         verifyEntityCandidateRegistered(ENTITY_TYPE, entityId, "member-1");
 
         try {
@@ -106,9 +102,9 @@ public class AkkaEntityOwnershipServiceTest extends AbstractNativeEosTest {
         }
 
         final DOMEntity entity2 = new DOMEntity(ENTITY_TYPE2, entityId);
-        final DOMEntityOwnershipCandidateRegistration reg2 = service.registerCandidate(entity2);
+        final Registration reg2 = service.registerCandidate(entity2);
 
-        verifyEntityOwnershipCandidateRegistration(entity2, reg2);
+        assertNotNull(reg2);
         verifyEntityCandidateRegistered(ENTITY_TYPE2, entityId, "member-1");
     }
 
@@ -117,9 +113,9 @@ public class AkkaEntityOwnershipServiceTest extends AbstractNativeEosTest {
         final YangInstanceIdentifier entityId = YangInstanceIdentifier.of(QNAME);
         final DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
 
-        final DOMEntityOwnershipCandidateRegistration reg = service.registerCandidate(entity);
+        final Registration reg = service.registerCandidate(entity);
+        assertNotNull(reg);
 
-        verifyEntityOwnershipCandidateRegistration(entity, reg);
         verifyEntityCandidateRegistered(ENTITY_TYPE, entityId, "member-1");
 
         reg.close();
@@ -136,13 +132,11 @@ public class AkkaEntityOwnershipServiceTest extends AbstractNativeEosTest {
         final DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
         final MockEntityOwnershipListener listener = new MockEntityOwnershipListener("member-1");
 
-        final DOMEntityOwnershipListenerRegistration reg = service.registerListener(entity.getType(), listener);
+        final Registration reg = service.registerListener(entity.getType(), listener);
 
         assertNotNull("EntityOwnershipListenerRegistration null", reg);
-        assertEquals("getEntityType", entity.getType(), reg.getEntityType());
-        assertEquals("getInstance", listener, reg.getInstance());
 
-        final DOMEntityOwnershipCandidateRegistration candidate = service.registerCandidate(entity);
+        final Registration candidate = service.registerCandidate(entity);
 
         verifyListenerState(listener, entity, true, true, false);
         final int changes = listener.getChanges().size();
@@ -161,7 +155,7 @@ public class AkkaEntityOwnershipServiceTest extends AbstractNativeEosTest {
     public void testGetOwnershipState() throws Exception {
         final DOMEntity entity = new DOMEntity(ENTITY_TYPE, "one");
 
-        final DOMEntityOwnershipCandidateRegistration registration = service.registerCandidate(entity);
+        final Registration registration = service.registerCandidate(entity);
         verifyGetOwnershipState(service, entity, EntityOwnershipState.IS_OWNER);
 
         final RunningContext runningContext = service.getRunningContext();
@@ -193,7 +187,7 @@ public class AkkaEntityOwnershipServiceTest extends AbstractNativeEosTest {
 
     @Test
     public void testEntityRetrievalWithYiid() throws Exception {
-        final YangInstanceIdentifier entityId = YangInstanceIdentifier.create(new NodeIdentifier(NetworkTopology.QNAME),
+        final YangInstanceIdentifier entityId = YangInstanceIdentifier.of(new NodeIdentifier(NetworkTopology.QNAME),
                 new NodeIdentifier(Topology.QNAME),
                 NodeIdentifierWithPredicates.of(Topology.QNAME, QName.create(Topology.QNAME, "topology-id"), "test"),
                 new NodeIdentifier(Node.QNAME),
@@ -201,50 +195,49 @@ public class AkkaEntityOwnershipServiceTest extends AbstractNativeEosTest {
 
         final DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
 
-        final DOMEntityOwnershipCandidateRegistration reg = service.registerCandidate(entity);
+        final Registration reg = service.registerCandidate(entity);
 
-        verifyEntityOwnershipCandidateRegistration(entity, reg);
+        assertNotNull(reg);
         verifyEntityCandidateRegistered(ENTITY_TYPE, entityId, "member-1");
 
-        RpcResult<GetEntityOutput> getEntityResult = service.getEntity(new GetEntityInputBuilder()
-                .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)))
-                .setType(new EntityType(ENTITY_TYPE))
-                .build())
-                .get();
+        var result = service.getEntity(new GetEntityInputBuilder()
+            .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)))
+            .setType(new EntityType(ENTITY_TYPE))
+            .build())
+            .get()
+            .getResult();
 
-        assertEquals(getEntityResult.getResult().getOwnerNode().getValue(), "member-1");
-        assertEquals(getEntityResult.getResult().getCandidateNodes().get(0).getValue(), "member-1");
+        assertEquals(result.getOwnerNode().getValue(), "member-1");
+        assertEquals(result.getCandidateNodes().get(0).getValue(), "member-1");
 
         // we should not be able to retrieve the entity when using string
         final String entityPathEncoded =
                 "/network-topology:network-topology/topology[topology-id='test']/node[node-id='test://test-node']";
 
-        getEntityResult = service.getEntity(new GetEntityInputBuilder()
-                .setName(new EntityName(entityPathEncoded))
-                .setType(new EntityType(ENTITY_TYPE))
-                .build())
-                .get();
-
-        assertNull(getEntityResult.getResult().getOwnerNode());
-        assertTrue(getEntityResult.getResult().getCandidateNodes().isEmpty());
-
-        final GetEntitiesOutput getEntitiesResult =
-                service.getEntities(new GetEntitiesInputBuilder().build()).get().getResult();
-
-        assertEquals(getEntitiesResult.getEntities().size(), 1);
-        assertTrue(getEntitiesResult.getEntities().get(new EntitiesKey(
-                new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)), new EntityType(ENTITY_TYPE)))
-                .getCandidateNodes().contains(new NodeName("member-1")));
-        assertTrue(getEntitiesResult.getEntities().get(new EntitiesKey(
+        result = service.getEntity(new GetEntityInputBuilder()
+            .setName(new EntityName(entityPathEncoded))
+            .setType(new EntityType(ENTITY_TYPE))
+            .build())
+            .get()
+            .getResult();
+
+        assertNull(result.getOwnerNode());
+        assertEquals(List.of(), result.getCandidateNodes());
+
+        final var getEntitiesResult = service.getEntities(new GetEntitiesInputBuilder().build()).get().getResult();
+        final var entities = getEntitiesResult.nonnullEntities();
+        assertEquals(1, entities.size());
+        assertTrue(entities.get(new EntitiesKey(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)),
+            new EntityType(ENTITY_TYPE))).getCandidateNodes().contains(new NodeName("member-1")));
+        assertTrue(entities.get(new EntitiesKey(
                         new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)),
                         new EntityType(ENTITY_TYPE)))
                 .getOwnerNode().getValue().equals("member-1"));
 
-        final GetEntityOwnerOutput getOwnerResult = service.getEntityOwner(new GetEntityOwnerInputBuilder()
-                        .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)))
-                        .setType(new EntityType(ENTITY_TYPE))
-                        .build())
-                .get().getResult();
+        final var getOwnerResult = service.getEntityOwner(new GetEntityOwnerInputBuilder()
+            .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)))
+            .setType(new EntityType(ENTITY_TYPE))
+            .build()).get().getResult();
 
         assertEquals(getOwnerResult.getOwnerNode().getValue(), "member-1");
     }
@@ -252,9 +245,7 @@ public class AkkaEntityOwnershipServiceTest extends AbstractNativeEosTest {
     private static void verifyGetOwnershipState(final DOMEntityOwnershipService service, final DOMEntity entity,
                                                 final EntityOwnershipState expState) {
         await().atMost(Duration.ofSeconds(5)).untilAsserted(() -> {
-            final Optional<EntityOwnershipState> state = service.getOwnershipState(entity);
-            assertTrue("getOwnershipState present", state.isPresent());
-            assertEquals("EntityOwnershipState", expState, state.get());
+            assertEquals(Optional.of(expState), service.getOwnershipState(entity));
         });
     }
 
@@ -310,10 +301,4 @@ public class AkkaEntityOwnershipServiceTest extends AbstractNativeEosTest {
 
         return success.get(CandidateRegistry.KEY).getEntries();
     }
-
-    private static void verifyEntityOwnershipCandidateRegistration(final DOMEntity entity,
-                                                                   final DOMEntityOwnershipCandidateRegistration reg) {
-        assertNotNull("EntityOwnershipCandidateRegistration null", reg);
-        assertEquals("getInstance", entity, reg.getInstance());
-    }
-}
\ No newline at end of file
+}
index 28566c7f15631a501aa92dde306d633171aba038..e4927ca67b7e123c501161f77dd0baca8e38ce0f 100644 (file)
@@ -60,7 +60,6 @@ public class DataCentersTest extends AbstractNativeEosTest {
     public void testDatacenterActivation() throws Exception {
         registerCandidates(node1, ENTITY_1, "member-1");
         registerCandidates(node3, ENTITY_1, "member-3");
-        registerCandidates(node4, ENTITY_1, "member-4");
 
         activateDatacenter(node1).get();
 
@@ -82,18 +81,15 @@ public class DataCentersTest extends AbstractNativeEosTest {
         verifyListenerState(listener1, ENTITY_1, true, false, false);
         verifyListenerState(listener2, ENTITY_1, true, true, false);
 
+        registerCandidates(node4, ENTITY_1, "member-4");
         unregisterCandidates(node3, ENTITY_1, "member-3");
 
         // checking index after notif so current + 1
         verifyListenerState(listener1, ENTITY_1, true, false, false);
-        verifyListenerState(listener2, ENTITY_1, true, false, true);
+        verifyListenerState(listener2, ENTITY_1, true, false, false);
 
         deactivateDatacenter(node3).get();
         activateDatacenter(node2).get();
-
-        // no candidate in dc-primary so no owners after datacenter activation
-        verifyListenerState(listener1, ENTITY_1, false, false, false);
-        verifyListenerState(listener2, ENTITY_1, false, false, false);
     }
 
     @Test
@@ -102,9 +98,13 @@ public class DataCentersTest extends AbstractNativeEosTest {
         registerCandidates(node3, ENTITY_1, "member-3");
         registerCandidates(node4, ENTITY_1, "member-4");
 
+        waitUntillCandidatePresent(node1, ENTITY_1, "member-1");
+        waitUntillCandidatePresent(node1, ENTITY_1, "member-3");
+        waitUntillCandidatePresent(node1, ENTITY_1, "member-4");
+
         activateDatacenter(node1).get();
 
-        waitUntillOwnerPresent(node1, ENTITY_1);
+        waitUntillOwnerPresent(node4, ENTITY_1);
         final MockEntityOwnershipListener listener1 = registerListener(node1, ENTITY_1);
         verifyListenerState(listener1, ENTITY_1, true, true, false);
 
@@ -122,6 +122,7 @@ public class DataCentersTest extends AbstractNativeEosTest {
         activateDatacenter(node3).get();
         verifyListenerState(listener2, ENTITY_1, true, true, false);
 
+        waitUntillOwnerPresent(node3, ENTITY_1);
         unregisterCandidates(node3, ENTITY_1, "member-3");
         verifyListenerState(listener2, ENTITY_1, true, false, true);
     }
index 3e43be1b4950a215c14b73fdcd5516c8e98ed5b8..28991c1e515ec6a366459e1c0d65be8390f792a1 100644 (file)
@@ -26,22 +26,18 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipCandidateRegistration;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityName;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityType;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntitiesInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntitiesOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwnerInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwnerOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.NodeName;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.get.entities.output.EntitiesKey;
 import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology;
 import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
 import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
@@ -64,7 +60,7 @@ public class EntityRpcHandlerTest extends AbstractNativeEosTest {
         service2 = new AkkaEntityOwnershipService(system2, CODEC_CONTEXT);
 
         // need to wait until all nodes are ready
-        final Cluster cluster = Cluster.get(Adapter.toTyped(system2));
+        final var cluster = Cluster.get(Adapter.toTyped(system2));
         Awaitility.await().atMost(Duration.ofSeconds(20)).until(() -> {
             final List<Member> members = new ArrayList<>();
             cluster.state().getMembers().forEach(members::add);
@@ -97,7 +93,7 @@ public class EntityRpcHandlerTest extends AbstractNativeEosTest {
      */
     @Test
     public void testEntityRetrievalWithUnavailableSupervisor() throws Exception {
-        final YangInstanceIdentifier entityId = YangInstanceIdentifier.create(new NodeIdentifier(NetworkTopology.QNAME),
+        final YangInstanceIdentifier entityId = YangInstanceIdentifier.of(new NodeIdentifier(NetworkTopology.QNAME),
                 new NodeIdentifier(Topology.QNAME),
                 NodeIdentifierWithPredicates.of(Topology.QNAME, QName.create(Topology.QNAME, "topology-id"), "test"),
                 new NodeIdentifier(Node.QNAME),
@@ -105,14 +101,13 @@ public class EntityRpcHandlerTest extends AbstractNativeEosTest {
 
         final DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
 
-        final DOMEntityOwnershipCandidateRegistration reg = service1.registerCandidate(entity);
+        final Registration reg = service1.registerCandidate(entity);
 
         await().untilAsserted(() -> {
-            final RpcResult<GetEntityOutput> getEntityResult = service1.getEntity(new GetEntityInputBuilder()
-                            .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)))
-                            .setType(new EntityType(ENTITY_TYPE))
-                            .build())
-                    .get();
+            final var getEntityResult = service1.getEntity(new GetEntityInputBuilder()
+                .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)))
+                .setType(new EntityType(ENTITY_TYPE))
+                .build()).get();
 
             assertEquals(getEntityResult.getResult().getOwnerNode().getValue(), "member-1");
             assertEquals(getEntityResult.getResult().getCandidateNodes().get(0).getValue(), "member-1");
@@ -121,37 +116,34 @@ public class EntityRpcHandlerTest extends AbstractNativeEosTest {
         // keep this under ask timeout to make sure the singleton actor in the inactive datacenter responds with failure
         // immediately, so that the rpc actor retries with distributed-data asap
         await().atMost(Duration.ofSeconds(2)).untilAsserted(() -> {
-            final GetEntitiesOutput getEntitiesResult =
-                    service2.getEntities(new GetEntitiesInputBuilder().build()).get().getResult();
-
-            assertEquals(getEntitiesResult.getEntities().size(), 1);
-            assertTrue(getEntitiesResult.getEntities().get(new EntitiesKey(
-                            new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)),
-                            new EntityType(ENTITY_TYPE)))
-                    .getCandidateNodes().contains(new NodeName("member-1")));
-            assertTrue(getEntitiesResult.getEntities().get(new EntitiesKey(
-                            new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)),
-                            new EntityType(ENTITY_TYPE)))
-                    .getOwnerNode().getValue().equals("member-1"));
+            final var getEntitiesResult = service2.getEntities(new GetEntitiesInputBuilder().build()).get().getResult();
+            final var entities = getEntitiesResult.nonnullEntities();
+            assertEquals(1, entities.size());
+            assertTrue(entities.get(new EntitiesKey(
+                new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)),
+                new EntityType(ENTITY_TYPE)))
+                .getCandidateNodes().contains(new NodeName("member-1")));
+            assertTrue(entities.get(new EntitiesKey(
+                new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)),
+                new EntityType(ENTITY_TYPE)))
+                .getOwnerNode().getValue().equals("member-1"));
         });
 
         await().atMost(Duration.ofSeconds(2)).untilAsserted(() -> {
-            final GetEntityOutput getEntityResult = service2.getEntity(new GetEntityInputBuilder()
-                            .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)))
-                            .setType(new EntityType(ENTITY_TYPE))
-                            .build())
-                    .get().getResult();
+            final var getEntityResult = service2.getEntity(new GetEntityInputBuilder()
+                .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)))
+                .setType(new EntityType(ENTITY_TYPE))
+                .build()).get().getResult();
 
             assertEquals(getEntityResult.getOwnerNode().getValue(), "member-1");
             assertEquals(getEntityResult.getCandidateNodes().get(0).getValue(), "member-1");
         });
 
         await().atMost(Duration.ofSeconds(2)).untilAsserted(() -> {
-            final GetEntityOwnerOutput getOwnerResult = service2.getEntityOwner(new GetEntityOwnerInputBuilder()
-                            .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)))
-                            .setType(new EntityType(ENTITY_TYPE))
-                            .build())
-                    .get().getResult();
+            final var getOwnerResult = service2.getEntityOwner(new GetEntityOwnerInputBuilder()
+                .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)))
+                .setType(new EntityType(ENTITY_TYPE))
+                .build()).get().getResult();
 
             assertEquals(getOwnerResult.getOwnerNode().getValue(), "member-1");
         });
index 71fffe12baf65854e43fc279db8a30836651da1d..7699799ba235b19f5be67c66dfebfa23a0eb0bb9 100644 (file)
@@ -223,6 +223,39 @@ public class ThreeNodeReachabilityTest extends AbstractNativeEosTest {
         verifyListenerState(node1Listener, ENTITY_1, true, false, false);
     }
 
+    @Test
+    public void testOwnerNotReassignedWhenOnlyCandidate() throws Exception {
+        startNode3();
+        final MockEntityOwnershipListener listener1 = registerListener(node1, ENTITY_1);
+        final MockEntityOwnershipListener listener2 = registerListener(node2, ENTITY_1);
+        verifyNoNotifications(listener1);
+        verifyNoNotifications(listener2);
+
+        registerCandidates(node3, ENTITY_1, "member-3");
+        waitUntillOwnerPresent(node1, ENTITY_1);
+
+        MockEntityOwnershipListener listener3 = registerListener(node3, ENTITY_1);
+        verifyListenerState(listener1, ENTITY_1, true, false, false);
+        verifyListenerState(listener3, ENTITY_1, true, true, false);
+
+        ActorTestKit.shutdown(node3.getActorSystem(), Duration.ofSeconds(20));
+
+        verifyListenerState(listener1, ENTITY_1, true, false, false);
+        verifyListenerState(listener2, ENTITY_1, true, false, false);
+
+        startNode3();
+        verifyListenerState(listener1, ENTITY_1, false, false, false);
+
+        listener3 = registerListener(node3, ENTITY_1);
+        verifyListenerState(listener3, ENTITY_1, false, false, false);
+
+        registerCandidates(node1, ENTITY_1, "member-1");
+
+        verifyListenerState(listener1, ENTITY_1, true, true, false);
+        verifyListenerState(listener3, ENTITY_1, true, false, false);
+
+    }
+
     private void startNode3() throws Exception {
         startNode3(3);
     }
@@ -232,7 +265,7 @@ public class ThreeNodeReachabilityTest extends AbstractNativeEosTest {
 
         // need to wait until all nodes are ready
         final Cluster cluster = Cluster.get(node2.getActorSystem());
-        await().atMost(Duration.ofSeconds(20)).until(() -> {
+        await().atMost(Duration.ofSeconds(30)).until(() -> {
             final List<Member> members = ImmutableList.copyOf(cluster.state().getMembers());
             if (members.size() != membersPresent) {
                 return false;
index 90bfb88b20ffaf91fcfd3db8ba5581e978d32d53..f544ed1a4ae2641dddc75dcddad4b94165efe937 100644 (file)
@@ -28,11 +28,10 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.opendaylight.controller.eos.akka.AbstractNativeEosTest;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipService;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonService;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceRegistration;
-import org.opendaylight.mdsal.singleton.common.api.ServiceGroupIdentifier;
-import org.opendaylight.mdsal.singleton.dom.impl.DOMClusterSingletonServiceProviderImpl;
+import org.opendaylight.mdsal.singleton.api.ClusterSingletonService;
+import org.opendaylight.mdsal.singleton.api.ServiceGroupIdentifier;
+import org.opendaylight.mdsal.singleton.impl.EOSClusterSingletonServiceProvider;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -44,9 +43,9 @@ public class ClusterSingletonIntegrationTest extends AbstractNativeEosTest {
     private MockNativeEntityOwnershipService node2;
     private MockNativeEntityOwnershipService node3;
 
-    private MockSingletonService singletonNode1;
-    private MockSingletonService singletonNode2;
-    private MockSingletonService singletonNode3;
+    private EOSClusterSingletonServiceProvider singletonNode1;
+    private EOSClusterSingletonServiceProvider singletonNode2;
+    private EOSClusterSingletonServiceProvider singletonNode3;
 
 
     @Before
@@ -55,14 +54,9 @@ public class ClusterSingletonIntegrationTest extends AbstractNativeEosTest {
         node2 = startupNativeService(2551, List.of("member-2"), THREE_NODE_SEED_NODES);
         node3 = startupNativeService(2552, List.of("member-3"), THREE_NODE_SEED_NODES);
 
-        singletonNode1 = new MockSingletonService(node1);
-        singletonNode1.initializeProvider();
-
-        singletonNode2 = new MockSingletonService(node2);
-        singletonNode2.initializeProvider();
-
-        singletonNode3 = new MockSingletonService(node3);
-        singletonNode3.initializeProvider();
+        singletonNode1 = new EOSClusterSingletonServiceProvider(node1);
+        singletonNode2 = new EOSClusterSingletonServiceProvider(node2);
+        singletonNode3 = new EOSClusterSingletonServiceProvider(node3);
 
         waitUntillNodeReady(node3);
     }
@@ -90,8 +84,7 @@ public class ClusterSingletonIntegrationTest extends AbstractNativeEosTest {
     @Test
     public void testSingletonOwnershipHandoff() {
         final MockClusterSingletonService service = new MockClusterSingletonService("member-1", "service-1");
-        final ClusterSingletonServiceRegistration registration =
-                singletonNode1.registerClusterSingletonService(service);
+        final Registration registration = singletonNode1.registerClusterSingletonService(service);
 
         verifyServiceActive(service);
 
@@ -108,14 +101,12 @@ public class ClusterSingletonIntegrationTest extends AbstractNativeEosTest {
     @Test
     public void testSingletonOwnershipHandoffOnNodeShutdown() throws Exception {
         MockClusterSingletonService service2 = new MockClusterSingletonService("member-2", "service-1");
-        ClusterSingletonServiceRegistration registration2 =
-                singletonNode2.registerClusterSingletonService(service2);
+        Registration registration2 = singletonNode2.registerClusterSingletonService(service2);
 
         verifyServiceActive(service2);
 
         final MockClusterSingletonService service3 = new MockClusterSingletonService("member-3", "service-1");
-        final ClusterSingletonServiceRegistration registration3 =
-                singletonNode3.registerClusterSingletonService(service3);
+        final Registration registration3 = singletonNode3.registerClusterSingletonService(service3);
 
         verifyServiceInactive(service3, 2);
 
@@ -124,8 +115,7 @@ public class ClusterSingletonIntegrationTest extends AbstractNativeEosTest {
         verifyServiceActive(service3);
 
         node2 = startupNativeService(2551, List.of("member-1"), THREE_NODE_SEED_NODES);
-        singletonNode2 = new MockSingletonService(node2);
-        singletonNode2.initializeProvider();
+        singletonNode2 = new EOSClusterSingletonServiceProvider(node2);
 
         waitUntillNodeReady(node2);
         service2 = new MockClusterSingletonService("member-2", "service-1");
@@ -135,7 +125,7 @@ public class ClusterSingletonIntegrationTest extends AbstractNativeEosTest {
         verifyServiceInactive(service2, 5);
     }
 
-    private void waitUntillNodeReady(MockNativeEntityOwnershipService node) {
+    private static void waitUntillNodeReady(final MockNativeEntityOwnershipService node) {
         // need to wait until all nodes are ready
         final Cluster cluster = Cluster.get(Adapter.toTyped(node.getActorSystem()));
         Awaitility.await().atMost(Duration.ofSeconds(20)).until(() -> {
@@ -155,19 +145,19 @@ public class ClusterSingletonIntegrationTest extends AbstractNativeEosTest {
         });
     }
 
-    private static void verifyServiceActive(MockClusterSingletonService service) {
+    private static void verifyServiceActive(final MockClusterSingletonService service) {
         await().untilAsserted(() -> assertTrue(service.isActivated()));
     }
 
-    private static void verifyServiceActive(MockClusterSingletonService service, long delay) {
+    private static void verifyServiceActive(final MockClusterSingletonService service, final long delay) {
         await().pollDelay(delay, TimeUnit.SECONDS).untilAsserted(() -> assertTrue(service.isActivated()));
     }
 
-    private static void verifyServiceInactive(MockClusterSingletonService service) {
+    private static void verifyServiceInactive(final MockClusterSingletonService service) {
         await().untilAsserted(() -> assertFalse(service.isActivated()));
     }
 
-    private static void verifyServiceInactive(MockClusterSingletonService service, long delay) {
+    private static void verifyServiceInactive(final MockClusterSingletonService service, final long delay) {
         await().pollDelay(delay, TimeUnit.SECONDS).untilAsserted(() -> assertFalse(service.isActivated()));
     }
 
@@ -177,9 +167,9 @@ public class ClusterSingletonIntegrationTest extends AbstractNativeEosTest {
         private final ServiceGroupIdentifier identifier;
         private boolean activated = false;
 
-        MockClusterSingletonService(String member, String identifier) {
+        MockClusterSingletonService(final String member, final String identifier) {
             this.member = member;
-            this.identifier = ServiceGroupIdentifier.create(identifier);
+            this.identifier = new ServiceGroupIdentifier(identifier);
         }
 
         @Override
@@ -204,10 +194,4 @@ public class ClusterSingletonIntegrationTest extends AbstractNativeEosTest {
             return activated;
         }
     }
-
-    private static class MockSingletonService extends DOMClusterSingletonServiceProviderImpl {
-        MockSingletonService(DOMEntityOwnershipService entityOwnershipService) {
-            super(entityOwnershipService);
-        }
-    }
 }
index ff23633e5e5793b7f0de0c9938e6c70122a0a266..08c2a36fb0a752c88df30cb726769b9ab1a6bf9b 100644 (file)
@@ -31,7 +31,11 @@ akka {
         # This value controls how quickly Entity Ownership Service decisions are
         # propagated within a node.
         notify-subscribers-interval = 20 ms
-      }
+    }
+    split-brain-resolver {
+      active-strategy = keep-majority
+      stable-after = 7s
+    }
   }
 }
 
index 159c507c2798b86994354a7cacda2dcac622be15..77e751dd0e4bb94a38d39022d3d840cde52c3d80 100644 (file)
@@ -12,13 +12,13 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
   <parent>
     <groupId>org.opendaylight.odlparent</groupId>
     <artifactId>bundle-parent</artifactId>
-    <version>9.0.12</version>
+    <version>13.0.11</version>
     <relativePath/>
   </parent>
 
   <groupId>org.opendaylight.controller</groupId>
   <artifactId>mdsal-it-base</artifactId>
-  <version>5.0.0-SNAPSHOT</version>
+  <version>9.0.3-SNAPSHOT</version>
   <packaging>bundle</packaging>
 
   <dependencyManagement>
@@ -26,7 +26,7 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
       <dependency>
         <groupId>org.opendaylight.mdsal</groupId>
         <artifactId>mdsal-artifacts</artifactId>
-        <version>8.0.10</version>
+        <version>13.0.1</version>
         <type>pom</type>
         <scope>import</scope>
       </dependency>
@@ -104,7 +104,7 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
     </dependency>
     <dependency>
         <groupId>org.osgi</groupId>
-        <artifactId>osgi.core</artifactId>
+        <artifactId>org.osgi.framework</artifactId>
         <scope>compile</scope>
     </dependency>
     <dependency>
index 3cce222809f96bef69f2290eabbcb5c8322e2323..3ea3d8e4a026aab70959b55271bedc4a618f6961 100644 (file)
@@ -13,13 +13,13 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
   <parent>
     <groupId>org.opendaylight.odlparent</groupId>
     <artifactId>bundle-parent</artifactId>
-    <version>9.0.12</version>
+    <version>13.0.11</version>
     <relativePath/>
   </parent>
 
   <groupId>org.opendaylight.controller</groupId>
   <artifactId>mdsal-it-parent</artifactId>
-  <version>5.0.0-SNAPSHOT</version>
+  <version>9.0.3-SNAPSHOT</version>
   <packaging>pom</packaging>
 
   <properties>
@@ -37,7 +37,7 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
       <dependency>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>controller-artifacts</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <type>pom</type>
         <scope>import</scope>
       </dependency>
@@ -91,26 +91,25 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
     </dependency>
     <dependency>
         <groupId>org.osgi</groupId>
-        <artifactId>osgi.core</artifactId>
-    </dependency>
-    <dependency>
-        <groupId>junit</groupId>
-        <artifactId>junit</artifactId>
+        <artifactId>org.osgi.framework</artifactId>
     </dependency>
 
-    <!-- Testing Dependencies -->
+    <!--
+        Unfortunately default mockito-inline does not work in OSGi.
+        See https://github.com/mockito/mockito/issues/2203#issuecomment-926372053
+      -->
     <dependency>
       <groupId>org.mockito</groupId>
       <artifactId>mockito-core</artifactId>
+      <version>4.11.0</version>
       <scope>test</scope>
     </dependency>
   </dependencies>
+
   <build>
     <plugins>
       <plugin>
           <artifactId>maven-surefire-plugin</artifactId>
-              <!-- Overridden to have TCP channel support -->
-          <version>3.0.0-M5</version>
           <configuration>
               <!-- Overridden to fix corruption, where the process would hang after test -->
               <forkNode implementation="org.apache.maven.plugin.surefire.extensions.SurefireForkNodeFactory"/>
@@ -177,20 +176,20 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
         <artifactId>maven-dependency-plugin</artifactId>
         <executions>
           <execution>
-           <id>unpack-karaf-resources</id>
-           <goals>
-            <goal>unpack-dependencies</goal>
-           </goals>
-           <phase>process-test-resources</phase>
-           <configuration>
-            <outputDirectory>${project.build.directory}/test-classes</outputDirectory>
-            <groupId>org.opendaylight.controller</groupId>
-            <includeArtifactIds>mockito-core,objenesis,mdsal-it-base</includeArtifactIds>
-            <excludes>META-INF\/**</excludes>
-            <ignorePermissions>false</ignorePermissions>
-           </configuration>
+            <id>unpack-karaf-resources</id>
+            <goals>
+              <goal>unpack-dependencies</goal>
+            </goals>
+            <phase>process-test-resources</phase>
+            <configuration>
+              <outputDirectory>${project.build.directory}/test-classes</outputDirectory>
+              <groupId>org.opendaylight.controller</groupId>
+              <includeArtifactIds>mockito-core,byte-buddy,objenesis,mdsal-it-base</includeArtifactIds>
+              <excludes>META-INF\/**</excludes>
+              <ignorePermissions>false</ignorePermissions>
+            </configuration>
           </execution>
-         </executions>
+        </executions>
       </plugin>
     </plugins>
   </build>
index 32703e58ec7b81fa17b068e75510b4a6b41c06ac..2cd8d0ac1bdb5c90799694b41a02e850fc2ca529 100644 (file)
   <parent>
     <groupId>org.opendaylight.mdsal</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>8.0.10</version>
+    <version>13.0.1</version>
     <relativePath/>
   </parent>
 
   <groupId>org.opendaylight.controller</groupId>
   <artifactId>mdsal-parent</artifactId>
-  <version>5.0.0-SNAPSHOT</version>
+  <version>9.0.3-SNAPSHOT</version>
   <packaging>pom</packaging>
 
   <dependencyManagement>
@@ -25,7 +25,7 @@
       <dependency>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>bundle-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <type>pom</type>
         <scope>import</scope>
       </dependency>
index bc453ae1471c6e272ab80f4f53e4578f896345a0..86f5e203f2323fdbf6b8c551a0431ac01243a09d 100644 (file)
@@ -5,13 +5,13 @@
   <parent>
     <groupId>org.opendaylight.odlparent</groupId>
     <artifactId>odlparent-lite</artifactId>
-    <version>9.0.12</version>
+    <version>13.0.11</version>
     <relativePath/>
   </parent>
 
   <groupId>org.opendaylight.controller</groupId>
   <artifactId>mdsal-aggregator</artifactId>
-  <version>5.0.0-SNAPSHOT</version>
+  <version>9.0.3-SNAPSHOT</version>
   <packaging>pom</packaging>
 
   <properties>
index e9dd05a1772e46519b7bfc74d672bcc1551e83d8..295d0d0552a6fbad351d43e89deb126c85439f7d 100644 (file)
@@ -11,7 +11,7 @@
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-parent</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../parent</relativePath>
   </parent>
 
   <packaging>bundle</packaging>
 
   <dependencies>
+    <dependency>
+      <groupId>com.github.spotbugs</groupId>
+      <artifactId>spotbugs-annotations</artifactId>
+      <optional>true</optional>
+    </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>sal-akka-raft</artifactId>
index 17d6f980cd3d1836b763f160186e7acdb1b7f54a..052a48940c2cbb8db823832565d8a006c0c07d51 100644 (file)
@@ -29,8 +29,8 @@ import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
 import org.opendaylight.controller.cluster.raft.RaftState;
 import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
 import org.opendaylight.controller.cluster.raft.behaviors.Leader;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.yangtools.concepts.Identifier;
 import org.opendaylight.yangtools.util.AbstractStringIdentifier;
 
@@ -68,10 +68,8 @@ public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort,
         if (message instanceof KeyValue) {
             if (isLeader()) {
                 persistData(getSender(), new PayloadIdentifier(persistIdentifier++), (Payload) message, false);
-            } else {
-                if (getLeader() != null) {
-                    getLeader().forward(message, getContext());
-                }
+            } else if (getLeader() != null) {
+                getLeader().forward(message, getContext());
             }
 
         } else if (message instanceof PrintState) {
@@ -83,7 +81,7 @@ public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort,
         } else if (message instanceof PrintRole) {
             if (LOG.isDebugEnabled()) {
                 if (getRaftState() == RaftState.Leader || getRaftState() == RaftState.IsolatedLeader) {
-                    final String followers = ((Leader)this.getCurrentBehavior()).printFollowerStates();
+                    final String followers = ((Leader)getCurrentBehavior()).printFollowerStates();
                     LOG.debug("{} = {}, Peers={}, followers={}", getId(), getRaftState(),
                         getRaftActorContext().getPeerIds(), followers);
                 } else {
@@ -106,7 +104,7 @@ public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort,
     }
 
     public Optional<ActorRef> createRoleChangeNotifier(final String actorId) {
-        ActorRef exampleRoleChangeNotifier = this.getContext().actorOf(
+        ActorRef exampleRoleChangeNotifier = getContext().actorOf(
             RoleChangeNotifier.getProps(actorId), actorId + "-notifier");
         return Optional.<ActorRef>of(exampleRoleChangeNotifier);
     }
@@ -118,8 +116,7 @@ public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort,
 
     @Override
     protected void applyState(final ActorRef clientActor, final Identifier identifier, final Object data) {
-        if (data instanceof KeyValue) {
-            KeyValue kv = (KeyValue) data;
+        if (data instanceof KeyValue kv) {
             state.put(kv.getKey(), kv.getValue());
             if (clientActor != null) {
                 clientActor.tell(new KeyValueSaved(), getSelf());
@@ -132,7 +129,7 @@ public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort,
     public void createSnapshot(final ActorRef actorRef, final Optional<OutputStream> installSnapshotStream) {
         try {
             if (installSnapshotStream.isPresent()) {
-                SerializationUtils.serialize((Serializable) state, installSnapshotStream.get());
+                SerializationUtils.serialize((Serializable) state, installSnapshotStream.orElseThrow());
             }
         } catch (RuntimeException e) {
             LOG.error("Exception in creating snapshot", e);
@@ -202,7 +199,7 @@ public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort,
         try {
             return new MapState((Map<String, String>) SerializationUtils.deserialize(snapshotBytes.read()));
         } catch (IOException e) {
-            throw new RuntimeException(e);
+            throw new IllegalStateException(e);
         }
     }
 
index 65d2109b30277f2a70efea3c4a3b219c8efa9b9f..312615671391e8db04eb33c0e0d51c452cf0202a 100644 (file)
@@ -19,7 +19,7 @@ public class ExampleConfigParamsImpl extends DefaultConfigParamsImpl {
     }
 
     @Override
-    public int getSnapshotChunkSize() {
+    public int getMaximumMessageSliceSize() {
         return 50;
     }
 }
index 6ef8a07d9dfa9891c2c0805536be75be143c3415..9559f1cff422c75fc116ffbfa321338eda320175 100644 (file)
@@ -35,8 +35,9 @@ public class LogGenerator {
     }
 
     public static class LoggingThread implements Runnable {
-
+        private final Random random = new Random();
         private final ActorRef clientActor;
+
         private volatile boolean stopLogging = false;
 
         public LoggingThread(final ActorRef clientActor) {
@@ -45,7 +46,6 @@ public class LogGenerator {
 
         @Override
         public void run() {
-            Random random = new Random();
             while (true) {
                 if (stopLogging) {
                     LOG.info("Logging stopped for client: {}", clientActor.path());
diff --git a/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/messages/KVv1.java b/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/messages/KVv1.java
new file mode 100644 (file)
index 0000000..7721a8b
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.example.messages;
+
+import java.io.Serializable;
+
+final class KVv1 implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    private final String key;
+    private final String value;
+
+    KVv1(String key, String value) {
+        this.key = key;
+        this.value = value;
+    }
+
+    Object readResolve() {
+        return new KeyValue(key, value);
+    }
+}
index 520188b8cd4dd91ecc8a4649ab5e3dab42744443..78eea5cd862a8f26dd7d1b73df156266f58fe8e3 100644 (file)
@@ -5,21 +5,20 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.example.messages;
 
-import java.io.Serializable;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 
-public class KeyValue extends Payload implements Serializable {
+public final class KeyValue extends Payload {
     private static final long serialVersionUID = 1L;
+
     private String key;
     private String value;
 
     public KeyValue() {
     }
 
-    public KeyValue(String key, String value) {
+    public KeyValue(final String key, final String value) {
         this.key = key;
         this.value = value;
     }
@@ -32,12 +31,15 @@ public class KeyValue extends Payload implements Serializable {
         return value;
     }
 
-    public void setKey(String key) {
-        this.key = key;
+    @Override
+    public int size() {
+        return value.length() + key.length();
     }
 
-    public void setValue(String value) {
-        this.value = value;
+    @Override
+    public int serializedSize() {
+        // Should be a better estimate
+        return size();
     }
 
     @Override
@@ -46,8 +48,7 @@ public class KeyValue extends Payload implements Serializable {
     }
 
     @Override
-    public int size() {
-        return this.value.length() + this.key.length();
+    protected Object writeReplace() {
+        return new KVv1(value, key);
     }
-
 }
index 4a84542846de89560c2feadcae61c175289210e3..306e7561a567c488017a62a3928708599b7970c7 100644 (file)
@@ -4,7 +4,7 @@
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-parent</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../parent</relativePath>
   </parent>
 
   <packaging>bundle</packaging>
 
   <dependencies>
+    <dependency>
+      <groupId>com.github.spotbugs</groupId>
+      <artifactId>spotbugs-annotations</artifactId>
+      <optional>true</optional>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.jdt</groupId>
+      <artifactId>org.eclipse.jdt.annotation</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>concepts</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>util</artifactId>
+    </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>sal-clustering-commons</artifactId>
       <groupId>org.apache.commons</groupId>
       <artifactId>commons-lang3</artifactId>
     </dependency>
-    <dependency>
-      <groupId>org.osgi</groupId>
-      <artifactId>osgi.core</artifactId>
-    </dependency>
 
     <!-- Test Dependencies -->
     <dependency>
       <artifactId>commons-io</artifactId>
       <scope>test</scope>
     </dependency>
-    <dependency>
-      <groupId>commons-lang</groupId>
-      <artifactId>commons-lang</artifactId>
-      <scope>test</scope>
-    </dependency>
   </dependencies>
 
   <build>
index 64506ee6867fedd656b190f420be7ac7ec44c9b9..53d317fba1148d7c10093d443d4e0ea7a80dfbeb 100644 (file)
@@ -43,7 +43,7 @@ public abstract class AbstractReplicatedLogImpl implements ReplicatedLog {
         this.snapshotTerm = snapshotTerm;
         this.logContext = logContext;
 
-        this.journal = new ArrayList<>(unAppliedEntries.size());
+        journal = new ArrayList<>(unAppliedEntries.size());
         for (ReplicatedLogEntry entry: unAppliedEntries) {
             append(entry);
         }
@@ -168,7 +168,7 @@ public abstract class AbstractReplicatedLogImpl implements ReplicatedLog {
         long totalSize = 0;
         for (int i = fromIndex; i < toIndex; i++) {
             ReplicatedLogEntry entry = journal.get(i);
-            totalSize += entry.size();
+            totalSize += entry.serializedSize();
             if (totalSize <= maxDataSize) {
                 retList.add(entry);
             } else {
index 0f14844d5666e5ee6f73f3d7cb005b607c41b3a7..c69decdd14d8578dcba92d608ca55018a8b34b21 100644 (file)
@@ -5,33 +5,19 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.raft;
 
 import akka.actor.ActorRef;
 import org.opendaylight.yangtools.concepts.Identifier;
 
-public interface ClientRequestTracker {
-    /**
-     * Returns the client actor that should be sent a response when consensus is achieved.
-     *
-     * @return the client actor
-     */
-    ActorRef getClientActor();
-
-    /**
-     * Returns the identifier of the object that is to be replicated. For example a transaction identifier in the case
-     * of a transaction.
-     *
-     * @return the identifier
-     */
-    Identifier getIdentifier();
-
-    /**
-     * Returns the index of the log entry that is to be replicated.
-     *
-     * @return the index
-     */
-    long getIndex();
+/**
+ * Consensus forwarding tracker.
+ *
+ * @param clientActor the client actor that should be sent a response when consensus is achieved
+ * @param identifier the identifier of the object that is to be replicated. For example a transaction identifier in the
+ *        case of a transaction
+ * @param logIndex the index of the log entry that is to be replicated
+ */
+public record ClientRequestTracker(long logIndex, ActorRef clientActor, Identifier identifier) {
 
 }
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ClientRequestTrackerImpl.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ClientRequestTrackerImpl.java
deleted file mode 100644 (file)
index 6ffb922..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.raft;
-
-import akka.actor.ActorRef;
-import org.opendaylight.yangtools.concepts.Identifier;
-
-public class ClientRequestTrackerImpl implements ClientRequestTracker {
-
-    private final ActorRef clientActor;
-    private final Identifier identifier;
-    private final long logIndex;
-
-    public ClientRequestTrackerImpl(ActorRef clientActor, Identifier identifier, long logIndex) {
-
-        this.clientActor = clientActor;
-
-        this.identifier = identifier;
-
-        this.logIndex = logIndex;
-    }
-
-    @Override
-    public ActorRef getClientActor() {
-        return clientActor;
-    }
-
-    @Override
-    public long getIndex() {
-        return logIndex;
-    }
-
-    @Override
-    public Identifier getIdentifier() {
-        return identifier;
-    }
-}
index 8351374d60fe0e73bb5ca2fbfa2ea01cbd5a3f52..7f98295a300c6861e78fe383713801aada7d62ca 100644 (file)
@@ -87,7 +87,7 @@ public interface ConfigParams {
      *
      * @return the maximum size (in bytes).
      */
-    int getSnapshotChunkSize();
+    int getMaximumMessageSliceSize();
 
     /**
      * Returns the maximum number of journal log entries to batch on recovery before applying.
index 37ed729bed3e4edaf8f5f013c6a497edb8ba7eb6..c83f90ec430e9d4ec0228b53e65b4cd9d9d726b4 100644 (file)
@@ -41,7 +41,7 @@ public class DefaultConfigParamsImpl implements ConfigParams {
      */
     private static final int ELECTION_TIME_MAX_VARIANCE = 100;
 
-    private static final int SNAPSHOT_CHUNK_SIZE = 2048 * 1000; //2MB
+    private static final int MAXIMUM_MESSAGE_SLICE_SIZE = 480 * 1024; // 480KiB
 
 
     /**
@@ -72,7 +72,7 @@ public class DefaultConfigParamsImpl implements ConfigParams {
     // 0 means direct threshold if disabled
     private int snapshotDataThreshold = 0;
 
-    private int snapshotChunkSize = SNAPSHOT_CHUNK_SIZE;
+    private int maximumMessageSliceSize = MAXIMUM_MESSAGE_SLICE_SIZE;
 
     private long electionTimeoutFactor = 2;
     private long candidateElectionTimeoutDivisor = 1;
@@ -95,9 +95,9 @@ public class DefaultConfigParamsImpl implements ConfigParams {
         this.snapshotBatchCount = snapshotBatchCount;
     }
 
-    public void setRecoverySnapshotIntervalSeconds(int recoverySnapshotInterval) {
+    public void setRecoverySnapshotIntervalSeconds(final int recoverySnapshotInterval) {
         checkArgument(recoverySnapshotInterval >= 0);
-        this.recoverySnapshotIntervalSeconds = recoverySnapshotInterval;
+        recoverySnapshotIntervalSeconds = recoverySnapshotInterval;
     }
 
     public void setSnapshotDataThresholdPercentage(final int snapshotDataThresholdPercentage) {
@@ -108,8 +108,8 @@ public class DefaultConfigParamsImpl implements ConfigParams {
         this.snapshotDataThreshold = snapshotDataThreshold;
     }
 
-    public void setSnapshotChunkSize(final int snapshotChunkSize) {
-        this.snapshotChunkSize = snapshotChunkSize;
+    public void setMaximumMessageSliceSize(final int maximumMessageSliceSize) {
+        this.maximumMessageSliceSize = maximumMessageSliceSize;
     }
 
     public void setJournalRecoveryLogBatchSize(final int journalRecoveryLogBatchSize) {
@@ -163,7 +163,7 @@ public class DefaultConfigParamsImpl implements ConfigParams {
 
     @Override
     public int getRecoverySnapshotIntervalSeconds() {
-        return this.recoverySnapshotIntervalSeconds;
+        return recoverySnapshotIntervalSeconds;
     }
 
     @Override
@@ -191,8 +191,8 @@ public class DefaultConfigParamsImpl implements ConfigParams {
     }
 
     @Override
-    public int getSnapshotChunkSize() {
-        return snapshotChunkSize;
+    public int getMaximumMessageSliceSize() {
+        return maximumMessageSliceSize;
     }
 
     @Override
index a76d6a29c272db22c34ff66c23769384305f19e9..f5c94fbf4cb26aaf8d61f42f7804b9d660d664fd 100644 (file)
@@ -7,6 +7,7 @@
  */
 package org.opendaylight.controller.cluster.raft;
 
+import static com.google.common.base.Preconditions.checkArgument;
 import static com.google.common.base.Preconditions.checkState;
 import static java.util.Objects.requireNonNull;
 
@@ -42,11 +43,8 @@ public final class FollowerLogInformation {
 
     private short payloadVersion = -1;
 
-    // Assume the HELIUM_VERSION version initially for backwards compatibility until we obtain the follower's
-    // actual version via AppendEntriesReply. Although we no longer support the Helium version, a pre-Boron
-    // follower will not have the version field in AppendEntriesReply so it will be set to 0 which is
-    // HELIUM_VERSION.
-    private short raftVersion = RaftVersions.HELIUM_VERSION;
+    // Assume the FLUORINE_VERSION version initially, as we no longer support pre-Fluorine versions.
+    private short raftVersion = RaftVersions.FLUORINE_VERSION;
 
     private final PeerInfo peerInfo;
 
@@ -65,7 +63,7 @@ public final class FollowerLogInformation {
      */
     @VisibleForTesting
     FollowerLogInformation(final PeerInfo peerInfo, final long matchIndex, final RaftActorContext context) {
-        this.nextIndex = context.getCommitIndex();
+        nextIndex = context.getCommitIndex();
         this.matchIndex = matchIndex;
         this.context = context;
         this.peerInfo = requireNonNull(peerInfo);
@@ -299,6 +297,7 @@ public final class FollowerLogInformation {
      * @param raftVersion the raft version.
      */
     public void setRaftVersion(final short raftVersion) {
+        checkArgument(raftVersion >= RaftVersions.FLUORINE_VERSION, "Unexpected version %s", raftVersion);
         this.raftVersion = raftVersion;
     }
 
@@ -317,8 +316,8 @@ public final class FollowerLogInformation {
      * @param state the LeaderInstallSnapshotState
      */
     public void setLeaderInstallSnapshotState(final @NonNull LeaderInstallSnapshotState state) {
-        if (this.installSnapshotState == null) {
-            this.installSnapshotState = requireNonNull(state);
+        if (installSnapshotState == null) {
+            installSnapshotState = requireNonNull(state);
         }
     }
 
index d1e0b5c286e609259a5837d3c4e1c08667478caf..6febb902517d08d21076b6f02fc8003b214bbaba 100644 (file)
@@ -29,7 +29,7 @@ import scala.concurrent.duration.FiniteDuration;
  *
  * @author Thomas Pantelis
  */
-class GetSnapshotReplyActor extends UntypedAbstractActor {
+final class GetSnapshotReplyActor extends UntypedAbstractActor {
     private static final Logger LOG = LoggerFactory.getLogger(GetSnapshotReplyActor.class);
 
     private final Params params;
@@ -88,7 +88,7 @@ class GetSnapshotReplyActor extends UntypedAbstractActor {
             this.replyToActor = requireNonNull(replyToActor);
             this.receiveTimeout = requireNonNull(receiveTimeout);
             this.id = requireNonNull(id);
-            this.peerInformation = peerInfo;
+            peerInformation = peerInfo;
         }
     }
 }
index 00f6a04346fddb28f525a0dbb9113652ad71aae0..d71d879a5c6016140615233479411b0a4242a105 100644 (file)
@@ -15,11 +15,12 @@ import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.PoisonPill;
 import akka.actor.Status;
+import akka.persistence.JournalProtocol;
+import akka.persistence.SnapshotProtocol;
 import com.google.common.annotations.VisibleForTesting;
-import java.util.ArrayList;
-import java.util.Collection;
+import com.google.common.collect.ImmutableList;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
 import java.util.Objects;
 import java.util.Optional;
@@ -50,12 +51,12 @@ import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
 import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
 import org.opendaylight.controller.cluster.raft.client.messages.Shutdown;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.messages.RequestLeadership;
 import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
 import org.opendaylight.controller.cluster.raft.persisted.NoopPayload;
 import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
 import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.yangtools.concepts.Identifier;
 import org.opendaylight.yangtools.concepts.Immutable;
 
@@ -100,8 +101,7 @@ import org.opendaylight.yangtools.concepts.Immutable;
  * </ul>
  */
 public abstract class RaftActor extends AbstractUntypedPersistentActor {
-
-    private static final long APPLY_STATE_DELAY_THRESHOLD_IN_NANOS = TimeUnit.MILLISECONDS.toNanos(50L); // 50 millis
+    private static final long APPLY_STATE_DELAY_THRESHOLD_IN_NANOS = TimeUnit.MILLISECONDS.toNanos(50);
 
     /**
      * This context should NOT be passed directly to any other actor it is
@@ -123,16 +123,16 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
 
     private boolean shuttingDown;
 
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
     protected RaftActor(final String id, final Map<String, String> peerAddresses,
          final Optional<ConfigParams> configParams, final short payloadVersion) {
 
         persistentProvider = new PersistentDataProvider(this);
         delegatingPersistenceProvider = new RaftActorDelegatingPersistentDataProvider(null, persistentProvider);
 
-        context = new RaftActorContextImpl(this.getSelf(),
-            this.getContext(), id, new ElectionTermImpl(persistentProvider, id, LOG),
-            -1, -1, peerAddresses,
-            configParams.isPresent() ? configParams.get() : new DefaultConfigParamsImpl(),
+        context = new RaftActorContextImpl(getSelf(), getContext(), id,
+            new ElectionTermImpl(persistentProvider, id, LOG), -1, -1, peerAddresses,
+            configParams.isPresent() ? configParams.orElseThrow() : new DefaultConfigParamsImpl(),
             delegatingPersistenceProvider, this::handleApplyState, LOG, this::executeInSelf);
 
         context.setPayloadVersion(payloadVersion);
@@ -225,9 +225,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
         if (snapshotSupport.handleSnapshotMessage(message, getSender())) {
             return;
         }
-        if (message instanceof ApplyState) {
-            ApplyState applyState = (ApplyState) message;
-
+        if (message instanceof ApplyState applyState) {
             if (!hasFollowers()) {
                 // for single node, the capture should happen after the apply state
                 // as we delete messages from the persistent journal which have made it to the snapshot
@@ -239,35 +237,38 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
             }
 
             possiblyHandleBehaviorMessage(message);
-        } else if (message instanceof ApplyJournalEntries) {
-            ApplyJournalEntries applyEntries = (ApplyJournalEntries) message;
+        } else if (message instanceof ApplyJournalEntries applyEntries) {
             LOG.debug("{}: Persisting ApplyJournalEntries with index={}", persistenceId(), applyEntries.getToIndex());
 
             persistence().persistAsync(applyEntries, NoopProcedure.instance());
-
         } else if (message instanceof FindLeader) {
-            getSender().tell(
-                new FindLeaderReply(getLeaderAddress()),
-                getSelf()
-            );
+            getSender().tell(new FindLeaderReply(getLeaderAddress()), getSelf());
         } else if (message instanceof GetOnDemandRaftState) {
             onGetOnDemandRaftStats();
         } else if (message instanceof InitiateCaptureSnapshot) {
             captureSnapshot();
-        } else if (message instanceof SwitchBehavior) {
-            switchBehavior((SwitchBehavior) message);
-        } else if (message instanceof LeaderTransitioning) {
-            onLeaderTransitioning((LeaderTransitioning)message);
+        } else if (message instanceof SwitchBehavior switchBehavior) {
+            switchBehavior(switchBehavior);
+        } else if (message instanceof LeaderTransitioning leaderTransitioning) {
+            onLeaderTransitioning(leaderTransitioning);
         } else if (message instanceof Shutdown) {
             onShutDown();
-        } else if (message instanceof Runnable) {
-            ((Runnable)message).run();
-        } else if (message instanceof NoopPayload) {
-            persistData(null, null, (NoopPayload) message, false);
-        } else if (message instanceof RequestLeadership) {
-            onRequestLeadership((RequestLeadership) message);
+        } else if (message instanceof Runnable runnable) {
+            runnable.run();
+        } else if (message instanceof NoopPayload noopPayload) {
+            persistData(null, null, noopPayload, false);
+        } else if (message instanceof RequestLeadership requestLeadership) {
+            onRequestLeadership(requestLeadership);
         } else if (!possiblyHandleBehaviorMessage(message)) {
-            handleNonRaftCommand(message);
+            if (message instanceof JournalProtocol.Response response
+                && delegatingPersistenceProvider.handleJournalResponse(response)) {
+                LOG.debug("{}: handled a journal response", persistenceId());
+            } else if (message instanceof SnapshotProtocol.Response response
+                && delegatingPersistenceProvider.handleSnapshotResponse(response)) {
+                LOG.debug("{}: handled a snapshot response", persistenceId());
+            } else {
+                handleNonRaftCommand(message);
+            }
         }
     }
 
@@ -412,7 +413,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
         Optional<ActorRef> roleChangeNotifier = getRoleChangeNotifier();
         if (getRaftState() == RaftState.Follower && roleChangeNotifier.isPresent()
                 && leaderTransitioning.getLeaderId().equals(getCurrentBehavior().getLeaderId())) {
-            roleChangeNotifier.get().tell(newLeaderStateChanged(getId(), null,
+            roleChangeNotifier.orElseThrow().tell(newLeaderStateChanged(getId(), null,
                 getCurrentBehavior().getLeaderPayloadVersion()), getSelf());
         }
     }
@@ -451,7 +452,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
         }
 
         final RaftActorBehavior currentBehavior = context.getCurrentBehavior();
-        OnDemandRaftState.AbstractBuilder<?, ?> builder = newOnDemandRaftStateBuilder()
+        final var builder = newOnDemandRaftStateBuilder()
                 .commitIndex(context.getCommitIndex())
                 .currentTerm(context.getTermInformation().getCurrentTerm())
                 .inMemoryJournalDataSize(replicatedLog().dataSize())
@@ -477,19 +478,14 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
             builder.lastLogTerm(lastLogEntry.getTerm());
         }
 
-        if (getCurrentBehavior() instanceof AbstractLeader) {
-            AbstractLeader leader = (AbstractLeader)getCurrentBehavior();
-            Collection<String> followerIds = leader.getFollowerIds();
-            List<FollowerInfo> followerInfoList = new ArrayList<>(followerIds.size());
-            for (String id: followerIds) {
-                final FollowerLogInformation info = leader.getFollower(id);
-                followerInfoList.add(new FollowerInfo(id, info.getNextIndex(), info.getMatchIndex(),
-                        info.isFollowerActive(), DurationFormatUtils.formatDurationHMS(
-                            TimeUnit.NANOSECONDS.toMillis(info.nanosSinceLastActivity())),
-                        context.getPeerInfo(info.getId()).isVoting()));
-            }
-
-            builder.followerInfoList(followerInfoList);
+        if (getCurrentBehavior() instanceof AbstractLeader leader) {
+            builder.followerInfoList(leader.getFollowerIds().stream()
+                .map(leader::getFollower)
+                .map(info -> new FollowerInfo(info.getId(), info.getNextIndex(), info.getMatchIndex(),
+                    info.isFollowerActive(), DurationFormatUtils.formatDurationHMS(
+                        TimeUnit.NANOSECONDS.toMillis(info.nanosSinceLastActivity())),
+                    context.getPeerInfo(info.getId()).isVoting()))
+                .collect(ImmutableList.toImmutableList()));
         }
 
         sender().tell(builder.build(), self());
@@ -516,7 +512,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
         if (!Objects.equals(lastLeaderId, currentBehavior.getLeaderId())
                 || oldBehaviorState.getLeaderPayloadVersion() != currentBehavior.getLeaderPayloadVersion()) {
             if (roleChangeNotifier.isPresent()) {
-                roleChangeNotifier.get().tell(newLeaderStateChanged(getId(), currentBehavior.getLeaderId(),
+                roleChangeNotifier.orElseThrow().tell(newLeaderStateChanged(getId(), currentBehavior.getLeaderId(),
                         currentBehavior.getLeaderPayloadVersion()), getSelf());
             }
 
@@ -533,7 +529,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
 
         if (roleChangeNotifier.isPresent()
                 && (oldBehavior == null || oldBehavior.state() != currentBehavior.state())) {
-            roleChangeNotifier.get().tell(new RoleChanged(getId(), oldBehaviorStateName ,
+            roleChangeNotifier.orElseThrow().tell(new RoleChanged(getId(), oldBehaviorStateName ,
                     currentBehavior.state().name()), getSelf());
         }
     }
@@ -625,8 +621,8 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
 
         if (wasAppended && hasFollowers()) {
             // Send log entry for replication.
-            getCurrentBehavior().handleMessage(getSelf(), new Replicate(clientActor, identifier, replicatedLogEntry,
-                    !batchHint));
+            getCurrentBehavior().handleMessage(getSelf(),
+                new Replicate(replicatedLogEntry.getIndex(), !batchHint, clientActor, identifier));
         }
     }
 
@@ -963,7 +959,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
             this.lastValidLeaderId = lastValidLeaderId;
             this.lastLeaderId = lastLeaderId;
             this.behavior = requireNonNull(behavior);
-            this.leaderPayloadVersion = behavior.getLeaderPayloadVersion();
+            leaderPayloadVersion = behavior.getLeaderPayloadVersion();
         }
 
         @Override
index 8ba0f48d72e9ca8c5825f657bdc5ec9940365c17..a27bb9c395688b4c99177d942a4dcd11a1ae5b76 100644 (file)
@@ -16,14 +16,12 @@ import akka.actor.ActorSystem;
 import akka.actor.Props;
 import akka.cluster.Cluster;
 import com.google.common.annotations.VisibleForTesting;
-import java.util.ArrayList;
+import com.google.common.collect.ImmutableList;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Map;
 import java.util.Optional;
-import java.util.Set;
 import java.util.concurrent.Executor;
 import java.util.function.Consumer;
 import java.util.function.LongSupplier;
@@ -110,7 +108,7 @@ public class RaftActorContextImpl implements RaftActorContext {
         this.lastApplied = lastApplied;
         this.configParams = requireNonNull(configParams);
         this.persistenceProvider = requireNonNull(persistenceProvider);
-        this.log = requireNonNull(logger);
+        log = requireNonNull(logger);
         this.applyStateConsumer = requireNonNull(applyStateConsumer);
 
         fileBackedOutputStreamFactory = new FileBackedOutputStreamFactory(
@@ -219,7 +217,7 @@ public class RaftActorContextImpl implements RaftActorContext {
 
     @Override
     public Logger getLogger() {
-        return this.log;
+        return log;
     }
 
     @Override
@@ -256,34 +254,27 @@ public class RaftActorContextImpl implements RaftActorContext {
 
     @Override
     public void updatePeerIds(final ServerConfigurationPayload serverConfig) {
-        votingMember = true;
-        boolean foundSelf = false;
-        Set<String> currentPeers = new HashSet<>(this.getPeerIds());
-        for (ServerInfo server : serverConfig.getServerConfig()) {
-            if (getId().equals(server.getId())) {
-                foundSelf = true;
-                if (!server.isVoting()) {
-                    votingMember = false;
-                }
+        boolean newVotingMember = false;
+        var currentPeers = new HashSet<>(getPeerIds());
+        for (var server : serverConfig.getServerConfig()) {
+            if (getId().equals(server.peerId())) {
+                newVotingMember = server.isVoting();
             } else {
-                VotingState votingState = server.isVoting() ? VotingState.VOTING : VotingState.NON_VOTING;
-                if (!currentPeers.contains(server.getId())) {
-                    this.addToPeers(server.getId(), null, votingState);
+                final var votingState = server.isVoting() ? VotingState.VOTING : VotingState.NON_VOTING;
+                if (currentPeers.contains(server.peerId())) {
+                    getPeerInfo(server.peerId()).setVotingState(votingState);
+                    currentPeers.remove(server.peerId());
                 } else {
-                    this.getPeerInfo(server.getId()).setVotingState(votingState);
-                    currentPeers.remove(server.getId());
+                    addToPeers(server.peerId(), null, votingState);
                 }
             }
         }
 
         for (String peerIdToRemove : currentPeers) {
-            this.removePeer(peerIdToRemove);
-        }
-
-        if (!foundSelf) {
-            votingMember = false;
+            removePeer(peerIdToRemove);
         }
 
+        votingMember = newVotingMember;
         log.debug("{}: Updated server config: isVoting: {}, peers: {}", id, votingMember, peerInfoMap.values());
 
         setDynamicServerConfigurationInUse();
@@ -367,7 +358,7 @@ public class RaftActorContextImpl implements RaftActorContext {
 
     @Override
     public void setDynamicServerConfigurationInUse() {
-        this.dynamicServerConfiguration = true;
+        dynamicServerConfiguration = true;
     }
 
     @Override
@@ -375,9 +366,9 @@ public class RaftActorContextImpl implements RaftActorContext {
         if (!isDynamicServerConfigurationInUse()) {
             return null;
         }
-        Collection<PeerInfo> peers = getPeers();
-        List<ServerInfo> newConfig = new ArrayList<>(peers.size() + 1);
-        for (PeerInfo peer: peers) {
+        final var peers = getPeers();
+        final var newConfig = ImmutableList.<ServerInfo>builderWithExpectedSize(peers.size() + (includeSelf ? 1 : 0));
+        for (PeerInfo peer : peers) {
             newConfig.add(new ServerInfo(peer.getId(), peer.isVoting()));
         }
 
@@ -385,7 +376,7 @@ public class RaftActorContextImpl implements RaftActorContext {
             newConfig.add(new ServerInfo(getId(), votingMember));
         }
 
-        return new ServerConfigurationPayload(newConfig);
+        return new ServerConfigurationPayload(newConfig.build());
     }
 
     @Override
@@ -413,7 +404,7 @@ public class RaftActorContextImpl implements RaftActorContext {
     }
 
     void setCurrentBehavior(final RaftActorBehavior behavior) {
-        this.currentBehavior = requireNonNull(behavior);
+        currentBehavior = requireNonNull(behavior);
     }
 
     @Override
index 0bd86382607f61b827e14a25dc8f86e15f11b70d..846ef22bb08c9cec938f02d9317f76684319188a 100644 (file)
@@ -13,8 +13,7 @@ import akka.japi.Procedure;
 import org.opendaylight.controller.cluster.DataPersistenceProvider;
 import org.opendaylight.controller.cluster.DelegatingPersistentDataProvider;
 import org.opendaylight.controller.cluster.PersistentDataProvider;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.PersistentPayload;
+import org.opendaylight.controller.cluster.raft.messages.PersistentPayload;
 
 /**
  * The DelegatingPersistentDataProvider used by RaftActor to override the configured persistent provider to
@@ -42,33 +41,19 @@ class RaftActorDelegatingPersistentDataProvider extends DelegatingPersistentData
     }
 
     private <T> void doPersist(final T entry, final Procedure<T> procedure, final boolean async) {
-        if (getDelegate().isRecoveryApplicable()) {
-            persistSuper(entry, procedure, async);
-        } else {
-            if (entry instanceof ReplicatedLogEntry) {
-                Payload payload = ((ReplicatedLogEntry)entry).getData();
-                if (payload instanceof PersistentPayload) {
-                    // We persist the Payload but not the ReplicatedLogEntry to avoid gaps in the journal indexes
-                    // on recovery if data persistence is later enabled.
-                    if (async) {
-                        persistentProvider.persistAsync(payload, p -> procedure.apply(entry));
-                    } else {
-                        persistentProvider.persist(payload, p -> procedure.apply(entry));
-                    }
-                } else {
-                    persistSuper(entry, procedure, async);
-                }
+        if (!getDelegate().isRecoveryApplicable() && entry instanceof ReplicatedLogEntry replicatedLogEntry
+            && replicatedLogEntry.getData() instanceof PersistentPayload payload) {
+            // We persist the Payload but not the ReplicatedLogEntry to avoid gaps in the journal indexes on recovery
+            // if data persistence is later enabled.
+            if (async) {
+                persistentProvider.persistAsync(payload, p -> procedure.apply(entry));
             } else {
-                persistSuper(entry, procedure, async);
+                persistentProvider.persist(payload, p -> procedure.apply(entry));
             }
-        }
-    }
-
-    private <T> void persistSuper(final T object, final Procedure<T> procedure, final boolean async) {
-        if (async) {
-            super.persistAsync(object, procedure);
+        } else if (async) {
+            super.persistAsync(entry, procedure);
         } else {
-            super.persist(object, procedure);
+            super.persist(entry, procedure);
         }
     }
 }
index c3d5af55cd401d2e66507d61983446a5e7e1b58b..3aeaff6d89f95876db6cbc365b39b017bcc134ef 100644 (file)
@@ -89,7 +89,7 @@ public class RaftActorLeadershipTransferCohort {
 
         Optional<ActorRef> roleChangeNotifier = raftActor.getRoleChangeNotifier();
         if (roleChangeNotifier.isPresent()) {
-            roleChangeNotifier.get().tell(raftActor.newLeaderStateChanged(context.getId(), null,
+            roleChangeNotifier.orElseThrow().tell(raftActor.newLeaderStateChanged(context.getId(), null,
                     currentBehavior.getLeaderPayloadVersion()), raftActor.self());
         }
 
@@ -122,9 +122,9 @@ public class RaftActorLeadershipTransferCohort {
     void doTransfer() {
         RaftActorBehavior behavior = raftActor.getCurrentBehavior();
         // Sanity check...
-        if (behavior instanceof Leader) {
+        if (behavior instanceof Leader leader) {
             isTransferring = true;
-            ((Leader)behavior).transferLeadership(this);
+            leader.transferLeadership(this);
         } else {
             LOG.debug("{}: No longer the leader - skipping transfer", raftActor.persistenceId());
             finish(true);
index 1f9b93acd7e507f3f837f550697b1fde8eb67ee5..4df0e7b58b5eb4aff84a03ac21f4865987b7c264 100644 (file)
@@ -8,8 +8,8 @@
 package org.opendaylight.controller.cluster.raft;
 
 import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 
 /**
  * Interface for a class that participates in raft actor persistence recovery.
index 10375f9406666234bb3f6f69d14cb2ea003ef7f8..389e8dfd8ff942a090a9201fa5432b944402d9d8 100644 (file)
@@ -14,6 +14,7 @@ import java.util.Collections;
 import java.util.concurrent.TimeUnit;
 import org.opendaylight.controller.cluster.PersistentDataProvider;
 import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
+import org.opendaylight.controller.cluster.raft.messages.PersistentPayload;
 import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
 import org.opendaylight.controller.cluster.raft.persisted.DeleteEntries;
 import org.opendaylight.controller.cluster.raft.persisted.EmptyState;
@@ -22,7 +23,6 @@ import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPay
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot.State;
 import org.opendaylight.controller.cluster.raft.persisted.UpdateElectionTerm;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.PersistentPayload;
 import org.slf4j.Logger;
 
 /**
@@ -46,7 +46,7 @@ class RaftActorRecoverySupport {
     RaftActorRecoverySupport(final RaftActorContext context, final RaftActorRecoveryCohort cohort) {
         this.context = context;
         this.cohort = cohort;
-        this.log = context.getLogger();
+        log = context.getLogger();
     }
 
     boolean handleRecoveryMessage(final Object message, final PersistentDataProvider persistentProvider) {
@@ -59,19 +59,18 @@ class RaftActorRecoverySupport {
         }
 
         boolean recoveryComplete = false;
-        if (message instanceof UpdateElectionTerm) {
-            context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(),
-                    ((UpdateElectionTerm) message).getVotedFor());
-        } else if (message instanceof SnapshotOffer) {
-            onRecoveredSnapshot((SnapshotOffer) message);
-        } else if (message instanceof ReplicatedLogEntry) {
-            onRecoveredJournalLogEntry((ReplicatedLogEntry) message);
-        } else if (message instanceof ApplyJournalEntries) {
-            onRecoveredApplyLogEntries(((ApplyJournalEntries) message).getToIndex());
-        } else if (message instanceof DeleteEntries) {
-            onDeleteEntries((DeleteEntries) message);
-        } else if (message instanceof ServerConfigurationPayload) {
-            context.updatePeerIds((ServerConfigurationPayload)message);
+        if (message instanceof UpdateElectionTerm updateElectionTerm) {
+            context.getTermInformation().update(updateElectionTerm.getCurrentTerm(), updateElectionTerm.getVotedFor());
+        } else if (message instanceof SnapshotOffer snapshotOffer) {
+            onRecoveredSnapshot(snapshotOffer);
+        } else if (message instanceof ReplicatedLogEntry replicatedLogEntry) {
+            onRecoveredJournalLogEntry(replicatedLogEntry);
+        } else if (message instanceof ApplyJournalEntries applyJournalEntries) {
+            onRecoveredApplyLogEntries(applyJournalEntries.getToIndex());
+        } else if (message instanceof DeleteEntries deleteEntries) {
+            onDeleteEntries(deleteEntries);
+        } else if (message instanceof ServerConfigurationPayload serverConfigurationPayload) {
+            context.updatePeerIds(serverConfigurationPayload);
         } else if (message instanceof RecoveryCompleted) {
             recoveryComplete = true;
             onRecoveryCompletedMessage(persistentProvider);
@@ -254,7 +253,7 @@ class RaftActorRecoverySupport {
         final SnapshotManager snapshotManager = context.getSnapshotManager();
         if (snapshotManager.capture(logEntry, -1)) {
             log.info("Capturing snapshot, resetting timer for the next recovery snapshot interval.");
-            this.recoverySnapshotTimer.reset().start();
+            recoverySnapshotTimer.reset().start();
         } else {
             log.info("SnapshotManager is not able to capture snapshot at this time. It will be retried "
                 + "again with the next recovered entry.");
@@ -262,7 +261,7 @@ class RaftActorRecoverySupport {
     }
 
     private boolean shouldTakeRecoverySnapshot() {
-        return this.recoverySnapshotTimer != null && this.recoverySnapshotTimer.elapsed(TimeUnit.SECONDS)
+        return recoverySnapshotTimer != null && recoverySnapshotTimer.elapsed(TimeUnit.SECONDS)
             >= context.getConfigParams().getRecoverySnapshotIntervalSeconds();
     }
 
@@ -338,6 +337,6 @@ class RaftActorRecoverySupport {
     }
 
     private static boolean isMigratedSerializable(final Object message) {
-        return message instanceof MigratedSerializable && ((MigratedSerializable)message).isMigrated();
+        return message instanceof MigratedSerializable migrated && migrated.isMigrated();
     }
 }
index 29641cb00e441d5dedc5e7cccccd69bb0b0e6d3a..fd2cd419d77344fc9284d044d089d0ca4cd21489 100644 (file)
@@ -12,11 +12,10 @@ import static java.util.Objects.requireNonNull;
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.Cancellable;
+import com.google.common.collect.ImmutableList;
 import java.util.ArrayDeque;
-import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Map;
 import java.util.Queue;
 import java.util.UUID;
@@ -28,6 +27,7 @@ import org.opendaylight.controller.cluster.raft.behaviors.AbstractLeader;
 import org.opendaylight.controller.cluster.raft.messages.AddServer;
 import org.opendaylight.controller.cluster.raft.messages.AddServerReply;
 import org.opendaylight.controller.cluster.raft.messages.ChangeServersVotingStatus;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.messages.RemoveServer;
 import org.opendaylight.controller.cluster.raft.messages.RemoveServerReply;
 import org.opendaylight.controller.cluster.raft.messages.ServerChangeReply;
@@ -36,7 +36,6 @@ import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
 import org.opendaylight.controller.cluster.raft.messages.UnInitializedFollowerSnapshotReply;
 import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
 import org.opendaylight.controller.cluster.raft.persisted.ServerInfo;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.yangtools.concepts.Identifier;
 import org.opendaylight.yangtools.util.AbstractUUIDIdentifier;
 import org.slf4j.Logger;
@@ -64,27 +63,27 @@ class RaftActorServerConfigurationSupport {
 
     RaftActorServerConfigurationSupport(final RaftActor raftActor) {
         this.raftActor = raftActor;
-        this.raftContext = raftActor.getRaftActorContext();
+        raftContext = raftActor.getRaftActorContext();
     }
 
     boolean handleMessage(final Object message, final ActorRef sender) {
-        if (message instanceof AddServer) {
-            onAddServer((AddServer) message, sender);
+        if (message instanceof AddServer addServer) {
+            onAddServer(addServer, sender);
             return true;
-        } else if (message instanceof RemoveServer) {
-            onRemoveServer((RemoveServer) message, sender);
+        } else if (message instanceof RemoveServer removeServer) {
+            onRemoveServer(removeServer, sender);
             return true;
-        } else if (message instanceof ChangeServersVotingStatus) {
-            onChangeServersVotingStatus((ChangeServersVotingStatus) message, sender);
+        } else if (message instanceof ChangeServersVotingStatus changeServersVotingStatus) {
+            onChangeServersVotingStatus(changeServersVotingStatus, sender);
             return true;
-        } else if (message instanceof ServerOperationTimeout) {
-            currentOperationState.onServerOperationTimeout((ServerOperationTimeout) message);
+        } else if (message instanceof ServerOperationTimeout serverOperationTimeout) {
+            currentOperationState.onServerOperationTimeout(serverOperationTimeout);
             return true;
-        } else if (message instanceof UnInitializedFollowerSnapshotReply) {
-            currentOperationState.onUnInitializedFollowerSnapshotReply((UnInitializedFollowerSnapshotReply) message);
+        } else if (message instanceof UnInitializedFollowerSnapshotReply uninitFollowerSnapshotReply) {
+            currentOperationState.onUnInitializedFollowerSnapshotReply(uninitFollowerSnapshotReply);
             return true;
-        } else if (message instanceof ApplyState) {
-            return onApplyState((ApplyState) message);
+        } else if (message instanceof ApplyState applyState) {
+            return onApplyState(applyState);
         } else if (message instanceof SnapshotComplete) {
             currentOperationState.onSnapshotComplete();
             return false;
@@ -748,7 +747,7 @@ class RaftActorServerConfigurationSupport {
         }
 
         private boolean updateLocalPeerInfo() {
-            List<ServerInfo> newServerInfoList = newServerInfoList();
+            final var newServerInfoList = newServerInfoList();
 
             // Check if new voting state would leave us with no voting members.
             boolean atLeastOneVoting = false;
@@ -765,28 +764,28 @@ class RaftActorServerConfigurationSupport {
             }
 
             raftContext.updatePeerIds(new ServerConfigurationPayload(newServerInfoList));
-            if (raftActor.getCurrentBehavior() instanceof AbstractLeader) {
-                AbstractLeader leader = (AbstractLeader) raftActor.getCurrentBehavior();
+            if (raftActor.getCurrentBehavior() instanceof AbstractLeader leader) {
                 leader.updateMinReplicaCount();
             }
 
             return true;
         }
 
-        private List<ServerInfo> newServerInfoList() {
-            Map<String, Boolean> serverVotingStatusMap = changeVotingStatusContext.getOperation()
-                    .getServerVotingStatusMap();
-            List<ServerInfo> newServerInfoList = new ArrayList<>();
-            for (String peerId: raftContext.getPeerIds()) {
-                newServerInfoList.add(new ServerInfo(peerId, serverVotingStatusMap.containsKey(peerId)
-                        ? serverVotingStatusMap.get(peerId) : raftContext.getPeerInfo(peerId).isVoting()));
+        private ImmutableList<ServerInfo> newServerInfoList() {
+            final var serverVotingStatusMap = changeVotingStatusContext.getOperation().getServerVotingStatusMap();
+            final var peerInfos = raftContext.getPeers();
+            final var newServerInfoList = ImmutableList.<ServerInfo>builderWithExpectedSize(peerInfos.size() + 1);
+            for (var peerInfo : peerInfos) {
+                final var peerId = peerInfo.getId();
+                final var voting = serverVotingStatusMap.get(peerId);
+                newServerInfoList.add(new ServerInfo(peerId, voting != null ? voting : peerInfo.isVoting()));
             }
 
-            newServerInfoList.add(new ServerInfo(raftContext.getId(), serverVotingStatusMap.containsKey(
-                    raftContext.getId()) ? serverVotingStatusMap.get(raftContext.getId())
-                            : raftContext.isVotingMember()));
+            final var myId = raftContext.getId();
+            final var myVoting = serverVotingStatusMap.get(myId);
+            newServerInfoList.add(new ServerInfo(myId, myVoting != null ? myVoting : raftContext.isVotingMember()));
 
-            return newServerInfoList;
+            return newServerInfoList.build();
         }
     }
 
index bc96713dc46f31f7974ddb02a70ee50a3f1f62b7..e7344d9b4f17f74e347cf0fc339ff211243a46b5 100644 (file)
@@ -60,18 +60,18 @@ class RaftActorSnapshotMessageSupport {
     }
 
     boolean handleSnapshotMessage(final Object message, final ActorRef sender) {
-        if (message instanceof ApplySnapshot) {
-            onApplySnapshot((ApplySnapshot) message);
-        } else if (message instanceof SaveSnapshotSuccess) {
-            onSaveSnapshotSuccess((SaveSnapshotSuccess) message);
-        } else if (message instanceof SaveSnapshotFailure) {
-            onSaveSnapshotFailure((SaveSnapshotFailure) message);
-        } else if (message instanceof CaptureSnapshotReply) {
-            onCaptureSnapshotReply((CaptureSnapshotReply) message);
+        if (message instanceof ApplySnapshot applySnapshot) {
+            onApplySnapshot(applySnapshot);
+        } else if (message instanceof SaveSnapshotSuccess saveSnapshotSuccess) {
+            onSaveSnapshotSuccess(saveSnapshotSuccess);
+        } else if (message instanceof SaveSnapshotFailure saveSnapshotFailure) {
+            onSaveSnapshotFailure(saveSnapshotFailure);
+        } else if (message instanceof CaptureSnapshotReply captureSnapshotReply) {
+            onCaptureSnapshotReply(captureSnapshotReply);
         } else if (COMMIT_SNAPSHOT.equals(message)) {
             context.getSnapshotManager().commit(-1, -1);
-        } else if (message instanceof GetSnapshot) {
-            onGetSnapshot(sender, (GetSnapshot) message);
+        } else if (message instanceof GetSnapshot getSnapshot) {
+            onGetSnapshot(sender, getSnapshot);
         } else if (message instanceof SnapshotComplete) {
             log.debug("{}: SnapshotComplete received", context.getId());
         } else {
index 7876ea7cf5ce6e7fdcac141598c637949ed2811b..a09a4aa2cbb94f19760dc6fe87744cda2f2a64f0 100644 (file)
@@ -13,13 +13,14 @@ package org.opendaylight.controller.cluster.raft;
  * @author Thomas Pantelis
  */
 public final class RaftVersions {
-    public static final short HELIUM_VERSION = 0;
-    public static final short LITHIUM_VERSION = 1;
-    public static final short BORON_VERSION = 3;
+    // HELIUM_VERSION = 0
+    // LITHIUM_VERSION = 1
+    // BORON_VERSION = 3
     public static final short FLUORINE_VERSION = 4;
-    public static final short CURRENT_VERSION = FLUORINE_VERSION;
+    public static final short ARGON_VERSION = 5;
+    public static final short CURRENT_VERSION = ARGON_VERSION;
 
     private RaftVersions() {
-
+        // Hidden on purpose
     }
 }
index 1348ffca9163adf786d4f715fce3ddf858605548..360f6b690376c1c413b1a2f04bc8a7e946400758 100644 (file)
@@ -8,7 +8,7 @@
 
 package org.opendaylight.controller.cluster.raft;
 
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 
 /**
  * Represents one entry in the replicated log.
@@ -42,6 +42,15 @@ public interface ReplicatedLogEntry {
      */
     int size();
 
+    /**
+     * Return the estimate of serialized size of this entry when passed through serialization. The estimate needs to
+     * be reasonably accurate and should err on the side of caution and report a slightly-higher size in face of
+     * uncertainty.
+     *
+     * @return An estimate of serialized size.
+     */
+    int serializedSize();
+
     /**
      * Checks if persistence is pending for this entry.
      *
index 8037fb8d73ce88e6309f6a0f56b6d4fb787e8761..57e6140fc9efc06bd0b343adce8ee852b896cbd4 100644 (file)
@@ -68,7 +68,7 @@ public class SnapshotManager implements SnapshotState {
      */
     public SnapshotManager(final RaftActorContext context, final Logger logger) {
         this.context = context;
-        this.log = logger;
+        log = logger;
     }
 
     public boolean isApplying() {
@@ -195,7 +195,7 @@ public class SnapshotManager implements SnapshotState {
                 newReplicatedToAllIndex, newReplicatedToAllTerm, unAppliedEntries, mandatoryTrim);
     }
 
-    private class AbstractSnapshotState implements SnapshotState {
+    private abstract class AbstractSnapshotState implements SnapshotState {
 
         @Override
         public boolean isCapturing() {
@@ -282,8 +282,7 @@ public class SnapshotManager implements SnapshotState {
         }
     }
 
-    private class Idle extends AbstractSnapshotState {
-
+    private final class Idle extends AbstractSnapshotState {
         @Override
         public boolean isCapturing() {
             return false;
@@ -307,12 +306,12 @@ public class SnapshotManager implements SnapshotState {
 
             log.debug("{}: lastSequenceNumber prior to capture: {}", persistenceId(), lastSequenceNumber);
 
-            SnapshotManager.this.currentState = CREATING;
+            currentState = CREATING;
 
             try {
                 createSnapshotProcedure.accept(Optional.ofNullable(installSnapshotStream));
             } catch (Exception e) {
-                SnapshotManager.this.currentState = IDLE;
+                currentState = IDLE;
                 log.error("Error creating snapshot", e);
                 return false;
             }
@@ -338,7 +337,7 @@ public class SnapshotManager implements SnapshotState {
 
         @Override
         public void apply(final ApplySnapshot toApply) {
-            SnapshotManager.this.applySnapshot = toApply;
+            applySnapshot = toApply;
 
             lastSequenceNumber = context.getPersistenceProvider().getLastSequenceNumber();
 
@@ -346,7 +345,7 @@ public class SnapshotManager implements SnapshotState {
 
             context.getPersistenceProvider().saveSnapshot(toApply.getSnapshot());
 
-            SnapshotManager.this.currentState = PERSISTING;
+            currentState = PERSISTING;
         }
 
         @Override
@@ -360,8 +359,7 @@ public class SnapshotManager implements SnapshotState {
         }
     }
 
-    private class Creating extends AbstractSnapshotState {
-
+    private final class Creating extends AbstractSnapshotState {
         @Override
         public void persist(final Snapshot.State snapshotState, final Optional<OutputStream> installSnapshotStream,
                 final long totalMemory) {
@@ -440,7 +438,8 @@ public class SnapshotManager implements SnapshotState {
             if (installSnapshotStream.isPresent()) {
                 if (context.getId().equals(currentBehavior.getLeaderId())) {
                     try {
-                        ByteSource snapshotBytes = ((FileBackedOutputStream)installSnapshotStream.get()).asByteSource();
+                        ByteSource snapshotBytes = ((FileBackedOutputStream)installSnapshotStream.orElseThrow())
+                            .asByteSource();
                         currentBehavior.handleMessage(context.getActor(),
                                 new SendInstallSnapshot(snapshot, snapshotBytes));
                     } catch (IOException e) {
@@ -448,12 +447,12 @@ public class SnapshotManager implements SnapshotState {
                                 context.getId(), e);
                     }
                 } else {
-                    ((FileBackedOutputStream)installSnapshotStream.get()).cleanup();
+                    ((FileBackedOutputStream)installSnapshotStream.orElseThrow()).cleanup();
                 }
             }
 
             captureSnapshot = null;
-            SnapshotManager.this.currentState = PERSISTING;
+            currentState = PERSISTING;
         }
 
         @Override
@@ -463,8 +462,7 @@ public class SnapshotManager implements SnapshotState {
 
     }
 
-    private class Persisting extends AbstractSnapshotState {
-
+    private final class Persisting extends AbstractSnapshotState {
         @Override
         @SuppressWarnings("checkstyle:IllegalCatch")
         public void commit(final long sequenceNumber, final long timeStamp) {
@@ -525,7 +523,7 @@ public class SnapshotManager implements SnapshotState {
         private void snapshotComplete() {
             lastSequenceNumber = -1;
             applySnapshot = null;
-            SnapshotManager.this.currentState = IDLE;
+            currentState = IDLE;
 
             context.getActor().tell(SnapshotComplete.INSTANCE, context.getActor());
         }
@@ -543,15 +541,15 @@ public class SnapshotManager implements SnapshotState {
         long getTerm();
     }
 
-    static class LastAppliedTermInformationReader implements TermInformationReader {
+    static final class LastAppliedTermInformationReader implements TermInformationReader {
         private long index;
         private long term;
 
         LastAppliedTermInformationReader init(final ReplicatedLog log, final long originalIndex,
                 final ReplicatedLogEntry lastLogEntry, final boolean hasFollowers) {
             ReplicatedLogEntry entry = log.get(originalIndex);
-            this.index = -1L;
-            this.term = -1L;
+            index = -1L;
+            term = -1L;
             if (!hasFollowers) {
                 if (lastLogEntry != null) {
                     // since we have persisted the last-log-entry to persistent journal before the capture,
@@ -571,23 +569,23 @@ public class SnapshotManager implements SnapshotState {
 
         @Override
         public long getIndex() {
-            return this.index;
+            return index;
         }
 
         @Override
         public long getTerm() {
-            return this.term;
+            return term;
         }
     }
 
-    private static class ReplicatedToAllTermInformationReader implements TermInformationReader {
+    private static final class ReplicatedToAllTermInformationReader implements TermInformationReader {
         private long index;
         private long term;
 
         ReplicatedToAllTermInformationReader init(final ReplicatedLog log, final long originalIndex) {
             ReplicatedLogEntry entry = log.get(originalIndex);
-            this.index = -1L;
-            this.term = -1L;
+            index = -1L;
+            term = -1L;
 
             if (entry != null) {
                 index = entry.getIndex();
@@ -599,12 +597,12 @@ public class SnapshotManager implements SnapshotState {
 
         @Override
         public long getIndex() {
-            return this.index;
+            return index;
         }
 
         @Override
         public long getTerm() {
-            return this.term;
+            return term;
         }
     }
 }
index f16e5e2b59e1d99d1eb9ab78501c8f00fdff60cb..93b5f04df33d8ee92ccef6c6b4a007ad56189859 100644 (file)
@@ -10,6 +10,7 @@ package org.opendaylight.controller.cluster.raft;
 import static java.util.Objects.requireNonNull;
 
 import akka.actor.Cancellable;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import scala.concurrent.duration.FiniteDuration;
 
 /**
@@ -27,10 +28,12 @@ abstract class TimedRunnable implements Runnable {
     private final Cancellable cancelTimer;
     private boolean canRun = true;
 
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR",
+        justification = "https://github.com/spotbugs/spotbugs/issues/1867")
     TimedRunnable(final FiniteDuration timeout, final RaftActor actor) {
         cancelTimer = requireNonNull(actor).getContext().system().scheduler()
-                .scheduleOnce(requireNonNull(timeout), actor.self(), (Runnable) this::cancel,
-                    actor.getContext().system().dispatcher(), actor.self());
+            .scheduleOnce(requireNonNull(timeout), actor.self(), (Runnable) this::cancel,
+                actor.getContext().system().dispatcher(), actor.self());
     }
 
     @Override
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/base/messages/EmptyExternalizableProxy.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/base/messages/EmptyExternalizableProxy.java
deleted file mode 100644 (file)
index 67f3ed9..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2017 Inocybe Technologies and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.raft.base.messages;
-
-import static java.util.Objects.requireNonNull;
-
-import java.io.Externalizable;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-
-/**
- * Abstract base that implements Externalizable with no-op methods that is intended for classes that use the
- * externalizable proxy pattern but have no data to serialize and read-resolve to a static instance.
- *
- * @author Thomas Pantelis
- */
-public abstract class EmptyExternalizableProxy implements Externalizable {
-    private static final long serialVersionUID = 1L;
-
-    private final Object readResolveTo;
-
-    protected EmptyExternalizableProxy(final Object readResolveTo) {
-        this.readResolveTo = requireNonNull(readResolveTo);
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) {
-    }
-
-    @Override
-    public void readExternal(final ObjectInput in) {
-    }
-
-    protected Object readResolve() {
-        return readResolveTo;
-    }
-}
index c58d86354a917d9fc968de3eff2b97ba6107d356..edd4986a47b4b36b036f2f0ea209ac32aab69b0c 100644 (file)
@@ -5,40 +5,11 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.raft.base.messages;
 
 import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
 import org.opendaylight.yangtools.concepts.Identifier;
 
-public class Replicate {
-    private final ActorRef clientActor;
-    private final Identifier identifier;
-    private final ReplicatedLogEntry replicatedLogEntry;
-    private final boolean sendImmediate;
-
-    public Replicate(ActorRef clientActor, Identifier identifier, ReplicatedLogEntry replicatedLogEntry,
-            boolean sendImmediate) {
-        this.clientActor = clientActor;
-        this.identifier = identifier;
-        this.replicatedLogEntry = replicatedLogEntry;
-        this.sendImmediate = sendImmediate;
-    }
-
-    public ActorRef getClientActor() {
-        return clientActor;
-    }
-
-    public Identifier getIdentifier() {
-        return identifier;
-    }
-
-    public ReplicatedLogEntry getReplicatedLogEntry() {
-        return replicatedLogEntry;
-    }
-
-    public boolean isSendImmediate() {
-        return sendImmediate;
-    }
+public record Replicate(long logIndex, boolean sendImmediate, ActorRef clientActor, Identifier identifier) {
+    // Nothing else here
 }
index b212250dd4984828d3c57f8a70f0a177da5cff5c..2b7684481955110bab875403db0447554583cc7e 100644 (file)
@@ -16,25 +16,18 @@ import java.io.Serializable;
  * @author Thomas Pantelis
  */
 public final class TimeoutNow implements Serializable, ControlMessage {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+
     public static final TimeoutNow INSTANCE = new TimeoutNow();
 
     private TimeoutNow() {
         // Hidden on purpose
     }
 
-    private Object writeReplace() {
-        return new Proxy();
-    }
-
-    private static class Proxy extends EmptyExternalizableProxy {
-        private static final long serialVersionUID = 1L;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            super(INSTANCE);
-        }
+    @java.io.Serial
+    @SuppressWarnings("static-method")
+    private Object readResolve() {
+        return INSTANCE;
     }
 }
index 6560ad76c3937285300173f16df53f4c60d9ae1d..7514dccff40c53aab2621cb56314859baee80d89 100644 (file)
@@ -20,7 +20,6 @@ import java.io.ObjectOutputStream;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
-import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -34,11 +33,11 @@ import org.opendaylight.controller.cluster.io.SharedFileBackedOutputStream;
 import org.opendaylight.controller.cluster.messaging.MessageSlicer;
 import org.opendaylight.controller.cluster.messaging.SliceOptions;
 import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
-import org.opendaylight.controller.cluster.raft.ClientRequestTrackerImpl;
 import org.opendaylight.controller.cluster.raft.FollowerLogInformation;
 import org.opendaylight.controller.cluster.raft.PeerInfo;
 import org.opendaylight.controller.cluster.raft.RaftActorContext;
 import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.RaftVersions;
 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
 import org.opendaylight.controller.cluster.raft.VotingState;
 import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
@@ -48,6 +47,7 @@ import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
 import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.IdentifiablePayload;
 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
 import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
@@ -56,8 +56,6 @@ import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
 import org.opendaylight.controller.cluster.raft.messages.UnInitializedFollowerSnapshotReply;
 import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.IdentifiablePayload;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import scala.concurrent.duration.FiniteDuration;
 
 /**
@@ -110,7 +108,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
         super(context, state);
 
         appendEntriesMessageSlicer = MessageSlicer.builder().logContext(logName())
-            .messageSliceSize(context.getConfigParams().getSnapshotChunkSize())
+            .messageSliceSize(context.getConfigParams().getMaximumMessageSliceSize())
             .expireStateAfterInactivity(context.getConfigParams().getElectionTimeOutInterval().toMillis() * 3,
                     TimeUnit.MILLISECONDS).build();
 
@@ -166,7 +164,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
         followerToLog.remove(followerId);
     }
 
-    public void updateMinReplicaCount() {
+    public final void updateMinReplicaCount() {
         int numVoting = 0;
         for (PeerInfo peer: context.getPeers()) {
             if (peer.isVoting()) {
@@ -221,6 +219,13 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
             return this;
         }
 
+        final var followerRaftVersion = appendEntriesReply.getRaftVersion();
+        if (followerRaftVersion < RaftVersions.FLUORINE_VERSION) {
+            log.warn("{}: handleAppendEntriesReply - ignoring reply from follower {} raft version {}", logName(),
+                followerId, followerRaftVersion);
+            return this;
+        }
+
         final long lastActivityNanos = followerLogInformation.nanosSinceLastActivity();
         if (lastActivityNanos > context.getConfigParams().getElectionTimeOutInterval().toNanos()) {
             log.warn("{} : handleAppendEntriesReply delayed beyond election timeout, "
@@ -231,7 +236,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
 
         followerLogInformation.markFollowerActive();
         followerLogInformation.setPayloadVersion(appendEntriesReply.getPayloadVersion());
-        followerLogInformation.setRaftVersion(appendEntriesReply.getRaftVersion());
+        followerLogInformation.setRaftVersion(followerRaftVersion);
         followerLogInformation.setNeedsLeaderAddress(appendEntriesReply.isNeedsLeaderAddress());
 
         long followerLastLogIndex = appendEntriesReply.getLogLastIndex();
@@ -442,15 +447,14 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
      * @return the ClientRequestTracker or null if none available
      */
     private ClientRequestTracker removeClientRequestTracker(final long logIndex) {
-        final Iterator<ClientRequestTracker> it = trackers.iterator();
+        final var it = trackers.iterator();
         while (it.hasNext()) {
-            final ClientRequestTracker t = it.next();
-            if (t.getIndex() == logIndex) {
+            final var tracker = it.next();
+            if (tracker.logIndex() == logIndex) {
                 it.remove();
-                return t;
+                return tracker;
             }
         }
-
         return null;
     }
 
@@ -460,17 +464,16 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
         // If it does that means the leader wasn't dropped before the transaction applied.
         // That means that this transaction can be safely applied as a local transaction since we
         // have the ClientRequestTracker.
-        final ClientRequestTracker tracker = removeClientRequestTracker(entry.getIndex());
+        final var tracker = removeClientRequestTracker(entry.getIndex());
         if (tracker != null) {
-            return new ApplyState(tracker.getClientActor(), tracker.getIdentifier(), entry);
+            return new ApplyState(tracker.clientActor(), tracker.identifier(), entry);
         }
 
         // Tracker is missing, this means that we switched behaviours between replicate and applystate
         // and became the leader again,. We still want to apply this as a local modification because
         // we have resumed leadership with that log entry having been committed.
-        final Payload payload = entry.getData();
-        if (payload instanceof IdentifiablePayload) {
-            return new ApplyState(null, ((IdentifiablePayload<?>) payload).getIdentifier(), entry);
+        if (entry.getData() instanceof IdentifiablePayload<?> identifiable) {
+            return new ApplyState(null, identifiable.getIdentifier(), entry);
         }
 
         return new ApplyState(null, null, entry);
@@ -493,47 +496,45 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
             return this;
         }
 
-        if (message instanceof RaftRPC) {
-            RaftRPC rpc = (RaftRPC) message;
-            // If RPC request or response contains term T > currentTerm:
-            // set currentTerm = T, convert to follower (§5.1)
-            // This applies to all RPC messages and responses
-            if (rpc.getTerm() > context.getTermInformation().getCurrentTerm() && shouldUpdateTerm(rpc)) {
-                log.info("{}: Term {} in \"{}\" message is greater than leader's term {} - switching to Follower",
-                        logName(), rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm());
-
-                context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
-
-                // This is a special case. Normally when stepping down as leader we don't process and reply to the
-                // RaftRPC as per raft. But if we're in the process of transferring leadership and we get a
-                // RequestVote, process the RequestVote before switching to Follower. This enables the requesting
-                // candidate node to be elected the leader faster and avoids us possibly timing out in the Follower
-                // state and starting a new election and grabbing leadership back before the other candidate node can
-                // start a new election due to lack of responses. This case would only occur if there isn't a majority
-                // of other nodes available that can elect the requesting candidate. Since we're transferring
-                // leadership, we should make every effort to get the requesting node elected.
-                if (rpc instanceof RequestVote && context.getRaftActorLeadershipTransferCohort() != null) {
-                    log.debug("{}: Leadership transfer in progress - processing RequestVote", logName());
-                    super.handleMessage(sender, rpc);
-                }
-
-                return internalSwitchBehavior(RaftState.Follower);
+        // If RPC request or response contains term T > currentTerm:
+        // set currentTerm = T, convert to follower (§5.1)
+        // This applies to all RPC messages and responses
+        if (message instanceof RaftRPC rpc && rpc.getTerm() > context.getTermInformation().getCurrentTerm()
+                && shouldUpdateTerm(rpc)) {
+
+            log.info("{}: Term {} in \"{}\" message is greater than leader's term {} - switching to Follower",
+                logName(), rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm());
+
+            context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
+
+            // This is a special case. Normally when stepping down as leader we don't process and reply to the
+            // RaftRPC as per raft. But if we're in the process of transferring leadership and we get a
+            // RequestVote, process the RequestVote before switching to Follower. This enables the requesting
+            // candidate node to be elected the leader faster and avoids us possibly timing out in the Follower
+            // state and starting a new election and grabbing leadership back before the other candidate node can
+            // start a new election due to lack of responses. This case would only occur if there isn't a majority
+            // of other nodes available that can elect the requesting candidate. Since we're transferring
+            // leadership, we should make every effort to get the requesting node elected.
+            if (rpc instanceof RequestVote requestVote && context.getRaftActorLeadershipTransferCohort() != null) {
+                log.debug("{}: Leadership transfer in progress - processing RequestVote", logName());
+                requestVote(sender, requestVote);
             }
+
+            return internalSwitchBehavior(RaftState.Follower);
         }
 
         if (message instanceof SendHeartBeat) {
             beforeSendHeartbeat();
             sendHeartBeat();
             scheduleHeartBeat(context.getConfigParams().getHeartBeatInterval());
-        } else if (message instanceof SendInstallSnapshot) {
-            SendInstallSnapshot sendInstallSnapshot = (SendInstallSnapshot) message;
+        } else if (message instanceof SendInstallSnapshot sendInstallSnapshot) {
             setSnapshotHolder(new SnapshotHolder(sendInstallSnapshot.getSnapshot(),
                 sendInstallSnapshot.getSnapshotBytes()));
             sendInstallSnapshot();
-        } else if (message instanceof Replicate) {
-            replicate((Replicate) message);
-        } else if (message instanceof InstallSnapshotReply) {
-            handleInstallSnapshotReply((InstallSnapshotReply) message);
+        } else if (message instanceof Replicate replicate) {
+            replicate(replicate);
+        } else if (message instanceof InstallSnapshotReply installSnapshotReply) {
+            handleInstallSnapshotReply(installSnapshotReply);
         } else if (message instanceof CheckConsensusReached) {
             possiblyUpdateCommitIndex();
         } else {
@@ -573,7 +574,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
                 if (installSnapshotState.isLastChunk(reply.getChunkIndex())) {
                     //this was the last chunk reply
 
-                    long followerMatchIndex = snapshotHolder.get().getLastIncludedIndex();
+                    long followerMatchIndex = snapshotHolder.orElseThrow().getLastIncludedIndex();
                     followerLogInformation.setMatchIndex(followerMatchIndex);
                     followerLogInformation.setNextIndex(followerMatchIndex + 1);
                     followerLogInformation.clearLeaderInstallSnapshotState();
@@ -641,17 +642,16 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
     }
 
     private void replicate(final Replicate replicate) {
-        long logIndex = replicate.getReplicatedLogEntry().getIndex();
+        final long logIndex = replicate.logIndex();
 
-        log.debug("{}: Replicate message: identifier: {}, logIndex: {}, payload: {}, isSendImmediate: {}", logName(),
-                replicate.getIdentifier(), logIndex, replicate.getReplicatedLogEntry().getData().getClass(),
-                replicate.isSendImmediate());
+        log.debug("{}: Replicate message: identifier: {}, logIndex: {}, isSendImmediate: {}", logName(),
+                replicate.identifier(), logIndex, replicate.sendImmediate());
 
         // Create a tracker entry we will use this later to notify the
         // client actor
-        if (replicate.getClientActor() != null) {
-            trackers.add(new ClientRequestTrackerImpl(replicate.getClientActor(), replicate.getIdentifier(),
-                    logIndex));
+        final var clientActor = replicate.clientActor();
+        if (clientActor != null) {
+            trackers.add(new ClientRequestTracker(logIndex, clientActor, replicate.identifier()));
         }
 
         boolean applyModificationToState = !context.anyVotingPeers()
@@ -662,7 +662,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
             applyLogToStateMachine(logIndex);
         }
 
-        if (replicate.isSendImmediate() && !followerToLog.isEmpty()) {
+        if (replicate.sendImmediate() && !followerToLog.isEmpty()) {
             sendAppendEntries(0, false);
         }
     }
@@ -776,14 +776,14 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
         // Try to get all the entries in the journal but not exceeding the max data size for a single AppendEntries
         // message.
         int maxEntries = (int) context.getReplicatedLog().size();
-        final int maxDataSize = context.getConfigParams().getSnapshotChunkSize();
+        final int maxDataSize = context.getConfigParams().getMaximumMessageSliceSize();
         final long followerNextIndex = followerLogInfo.getNextIndex();
         List<ReplicatedLogEntry> entries = context.getReplicatedLog().getFrom(followerNextIndex,
                 maxEntries, maxDataSize);
 
         // If the first entry's size exceeds the max data size threshold, it will be returned from the call above. If
         // that is the case, then we need to slice it into smaller chunks.
-        if (!(entries.size() == 1 && entries.get(0).getData().size() > maxDataSize)) {
+        if (entries.size() != 1 || entries.get(0).getData().serializedSize() <= maxDataSize) {
             // Don't need to slice.
             return entries;
         }
@@ -904,10 +904,10 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
         }
 
         boolean captureInitiated = context.getSnapshotManager().captureToInstall(context.getReplicatedLog().last(),
-            this.getReplicatedToAllIndex(), followerId);
+            getReplicatedToAllIndex(), followerId);
         if (captureInitiated) {
             followerLogInfo.setLeaderInstallSnapshotState(new LeaderInstallSnapshotState(
-                context.getConfigParams().getSnapshotChunkSize(), logName()));
+                context.getConfigParams().getMaximumMessageSliceSize(), logName()));
         }
 
         return captureInitiated;
@@ -949,14 +949,14 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
         if (snapshotHolder.isPresent()) {
             LeaderInstallSnapshotState installSnapshotState = followerLogInfo.getInstallSnapshotState();
             if (installSnapshotState == null) {
-                installSnapshotState = new LeaderInstallSnapshotState(context.getConfigParams().getSnapshotChunkSize(),
-                        logName());
+                installSnapshotState = new LeaderInstallSnapshotState(
+                        context.getConfigParams().getMaximumMessageSliceSize(), logName());
                 followerLogInfo.setLeaderInstallSnapshotState(installSnapshotState);
             }
 
             try {
                 // Ensure the snapshot bytes are set - this is a no-op.
-                installSnapshotState.setSnapshotBytes(snapshotHolder.get().getSnapshotBytes());
+                installSnapshotState.setSnapshotBytes(snapshotHolder.orElseThrow().getSnapshotBytes());
 
                 if (!installSnapshotState.canSendNextChunk()) {
                     return;
@@ -981,7 +981,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
             } catch (IOException e) {
                 log.warn("{}: Unable to send chunk: {}/{}. Reseting snapshot progress. Snapshot state: {}", logName(),
                         installSnapshotState.getChunkIndex(), installSnapshotState.getTotalChunks(),
-                        installSnapshotState);
+                        installSnapshotState, e);
                 installSnapshotState.reset();
             }
         }
@@ -995,14 +995,14 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
         installSnapshotState.startChunkTimer();
         followerActor.tell(
                 new InstallSnapshot(currentTerm(), context.getId(),
-                        snapshotHolder.get().getLastIncludedIndex(),
-                        snapshotHolder.get().getLastIncludedTerm(),
+                        snapshotHolder.orElseThrow().getLastIncludedIndex(),
+                        snapshotHolder.orElseThrow().getLastIncludedTerm(),
                         snapshotChunk,
                         chunkIndex,
                         installSnapshotState.getTotalChunks(),
                         OptionalInt.of(installSnapshotState.getLastChunkHashCode()),
-                        serverConfig
-                ).toSerializable(followerLogInfo.getRaftVersion()),
+                        serverConfig,
+                        followerLogInfo.getRaftVersion()),
                 actor()
         );
     }
@@ -1124,8 +1124,8 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
         private final ByteSource snapshotBytes;
 
         SnapshotHolder(final Snapshot snapshot, final ByteSource snapshotBytes) {
-            this.lastIncludedTerm = snapshot.getLastAppliedTerm();
-            this.lastIncludedIndex = snapshot.getLastAppliedIndex();
+            lastIncludedTerm = snapshot.getLastAppliedTerm();
+            lastIncludedIndex = snapshot.getLastAppliedIndex();
             this.snapshotBytes = snapshotBytes;
         }
 
index fd2fbd332c7a58bab6f60b01e37b2193ad98c3e7..055a0535001f56f996cedc8bff349ae403aeb9b3 100644 (file)
@@ -15,8 +15,8 @@ import akka.cluster.Cluster;
 import akka.cluster.Member;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.Optional;
-import java.util.Random;
 import java.util.Set;
+import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 import org.opendaylight.controller.cluster.raft.RaftActorContext;
 import org.opendaylight.controller.cluster.raft.RaftState;
@@ -70,26 +70,19 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
     AbstractRaftActorBehavior(final RaftActorContext context, final RaftState state) {
         this.context = requireNonNull(context);
         this.state = requireNonNull(state);
-        this.log = context.getLogger();
+        log = context.getLogger();
 
         logName = String.format("%s (%s)", context.getId(), state);
     }
 
     public static RaftActorBehavior createBehavior(final RaftActorContext context, final RaftState state) {
-        switch (state) {
-            case Candidate:
-                return new Candidate(context);
-            case Follower:
-                return new Follower(context);
-            case IsolatedLeader:
-                return new IsolatedLeader(context);
-            case Leader:
-                return new Leader(context);
-            case PreLeader:
-                return new PreLeader(context);
-            default:
-                throw new IllegalArgumentException("Unhandled state " + state);
-        }
+        return switch (state) {
+            case Candidate -> new Candidate(context);
+            case Follower -> new Follower(context);
+            case IsolatedLeader -> new IsolatedLeader(context);
+            case Leader -> new Leader(context);
+            case PreLeader -> new PreLeader(context);
+        };
     }
 
     @Override
@@ -212,10 +205,8 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
             // the log with the later term is more up-to-date. If the logs
             // end with the same term, then whichever log is longer is
             // more up-to-date.
-            if (requestVote.getLastLogTerm() > lastTerm()) {
-                candidateLatest = true;
-            } else if (requestVote.getLastLogTerm() == lastTerm()
-                    && requestVote.getLastLogIndex() >= lastIndex()) {
+            if (requestVote.getLastLogTerm() > lastTerm()
+                || requestVote.getLastLogTerm() == lastTerm() && requestVote.getLastLogIndex() >= lastIndex()) {
                 candidateLatest = true;
             }
 
@@ -247,7 +238,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
      * @return a random election duration
      */
     protected FiniteDuration electionDuration() {
-        long variance = new Random().nextInt(context.getConfigParams().getElectionTimeVariance());
+        long variance = ThreadLocalRandom.current().nextInt(context.getConfigParams().getElectionTimeVariance());
         return context.getConfigParams().getElectionTimeOutInterval().$plus(
                 new FiniteDuration(variance, TimeUnit.MILLISECONDS));
     }
@@ -270,6 +261,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
      *
      * @param interval the duration after which we should trigger a new election
      */
+    // Non-final for testing
     protected void scheduleElection(final FiniteDuration interval) {
         stopElection();
 
@@ -301,7 +293,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
      *
      * @return the actor
      */
-    protected ActorRef actor() {
+    protected final ActorRef actor() {
         return context.getActor();
     }
 
@@ -419,14 +411,14 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
 
     @Override
     public RaftActorBehavior handleMessage(final ActorRef sender, final Object message) {
-        if (message instanceof AppendEntries) {
-            return appendEntries(sender, (AppendEntries) message);
-        } else if (message instanceof AppendEntriesReply) {
-            return handleAppendEntriesReply(sender, (AppendEntriesReply) message);
-        } else if (message instanceof RequestVote) {
-            return requestVote(sender, (RequestVote) message);
-        } else if (message instanceof RequestVoteReply) {
-            return handleRequestVoteReply(sender, (RequestVoteReply) message);
+        if (message instanceof AppendEntries appendEntries) {
+            return appendEntries(sender, appendEntries);
+        } else if (message instanceof AppendEntriesReply appendEntriesReply) {
+            return handleAppendEntriesReply(sender, appendEntriesReply);
+        } else if (message instanceof RequestVote requestVote) {
+            return requestVote(sender, requestVote);
+        } else if (message instanceof RequestVoteReply requestVoteReply) {
+            return handleRequestVoteReply(sender, requestVoteReply);
         } else {
             return null;
         }
@@ -447,12 +439,12 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
             return this;
         }
 
-        log.info("{} :- Switching from behavior {} to {}, election term: {}", logName(), this.state(),
+        log.info("{} :- Switching from behavior {} to {}, election term: {}", logName(), state(),
                 newBehavior.state(), context.getTermInformation().getCurrentTerm());
         try {
             close();
         } catch (RuntimeException e) {
-            log.error("{}: Failed to close behavior : {}", logName(), this.state(), e);
+            log.error("{}: Failed to close behavior : {}", logName(), state(), e);
         }
         return newBehavior;
     }
@@ -496,25 +488,24 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
         }
     }
 
-    protected String getId() {
+    protected final String getId() {
         return context.getId();
     }
 
     // Check whether we should update the term. In case of half-connected nodes, we want to ignore RequestVote
     // messages, as the candidate is not able to receive our response.
     protected boolean shouldUpdateTerm(final RaftRPC rpc) {
-        if (!(rpc instanceof RequestVote)) {
+        if (!(rpc instanceof RequestVote requestVote)) {
             return true;
         }
 
-        final RequestVote requestVote = (RequestVote) rpc;
         log.debug("{}: Found higher term in RequestVote rpc, verifying whether it's safe to update term.", logName());
         final Optional<Cluster> maybeCluster = context.getCluster();
         if (!maybeCluster.isPresent()) {
             return true;
         }
 
-        final Cluster cluster = maybeCluster.get();
+        final Cluster cluster = maybeCluster.orElseThrow();
 
         final Set<Member> unreachable = cluster.state().getUnreachable();
         log.debug("{}: Cluster state: {}", logName(), unreachable);
index a8762ec76edb11e755731379960a7014f215a598..77f7a06c49dffe75557e6544e2deba03f16f5614 100644 (file)
@@ -5,13 +5,11 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.raft.behaviors;
 
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
-import java.util.ArrayList;
-import java.util.Collection;
+import com.google.common.collect.ImmutableList;
 import org.opendaylight.controller.cluster.raft.PeerInfo;
 import org.opendaylight.controller.cluster.raft.RaftActorContext;
 import org.opendaylight.controller.cluster.raft.RaftState;
@@ -44,22 +42,19 @@ import scala.concurrent.duration.FiniteDuration;
  * <li> If election timeout elapses: start new election
  * </ul>
  */
-public class Candidate extends AbstractRaftActorBehavior {
-
-    private int voteCount;
-
+public final class Candidate extends AbstractRaftActorBehavior {
+    private final ImmutableList<String> votingPeers;
     private final int votesRequired;
 
-    private final Collection<String> votingPeers = new ArrayList<>();
+    private int voteCount;
 
     public Candidate(final RaftActorContext context) {
         super(context, RaftState.Candidate);
 
-        for (PeerInfo peer: context.getPeers()) {
-            if (peer.isVoting()) {
-                votingPeers.add(peer.getId());
-            }
-        }
+        votingPeers = context.getPeers().stream()
+            .filter(PeerInfo::isVoting)
+            .map(PeerInfo::getId)
+            .collect(ImmutableList.toImmutableList());
 
         log.debug("{}: Election: Candidate has following voting peers: {}", logName(), votingPeers);
 
@@ -75,12 +70,12 @@ public class Candidate extends AbstractRaftActorBehavior {
     }
 
     @Override
-    public final String getLeaderId() {
+    public String getLeaderId() {
         return null;
     }
 
     @Override
-    public final short getLeaderPayloadVersion() {
+    public short getLeaderPayloadVersion() {
         return -1;
     }
 
@@ -134,7 +129,7 @@ public class Candidate extends AbstractRaftActorBehavior {
 
 
     @Override
-    final ApplyState getApplyStateFor(final ReplicatedLogEntry entry) {
+    ApplyState getApplyStateFor(final ReplicatedLogEntry entry) {
         throw new IllegalStateException("A candidate should never attempt to apply " + entry);
     }
 
@@ -158,9 +153,7 @@ public class Candidate extends AbstractRaftActorBehavior {
             return this;
         }
 
-        if (message instanceof RaftRPC) {
-
-            RaftRPC rpc = (RaftRPC) message;
+        if (message instanceof RaftRPC rpc) {
 
             log.debug("{}: RaftRPC message received {}, my term is {}", logName(), rpc,
                         context.getTermInformation().getCurrentTerm());
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/FI.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/FI.java
new file mode 100644 (file)
index 0000000..79c605a
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+
+/**
+ * Serialization proxy for {@link FollowerIdentifier}.
+ */
+final class FI implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private String value;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public FI() {
+        // For Externalizable
+    }
+
+    FI(final String value) {
+        this.value = requireNonNull(value);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        out.writeObject(value);
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        value = (String) in.readObject();
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return new FollowerIdentifier(value);
+    }
+}
index 288ce32a64ec21286adda9cf3672f7b53d1bf506..9dd630aade8a5e2e0a8265aa69564ceaace63cd4 100644 (file)
@@ -16,8 +16,8 @@ import akka.cluster.Member;
 import akka.cluster.MemberStatus;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Stopwatch;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Optional;
@@ -53,6 +53,7 @@ import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
  * convert to candidate
  * </ul>
  */
+// Non-final for testing
 public class Follower extends AbstractRaftActorBehavior {
     private static final long MAX_ELECTION_TIMEOUT_FACTOR = 18;
 
@@ -69,11 +70,13 @@ public class Follower extends AbstractRaftActorBehavior {
         this(context, null, (short)-1);
     }
 
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR",
+        justification = "electionDuration() is not final for Candidate override")
     public Follower(final RaftActorContext context, final String initialLeaderId,
             final short initialLeaderPayloadVersion) {
         super(context, RaftState.Follower);
-        this.leaderId = initialLeaderId;
-        this.leaderPayloadVersion = initialLeaderPayloadVersion;
+        leaderId = initialLeaderId;
+        leaderPayloadVersion = initialLeaderPayloadVersion;
 
         initialSyncStatusTracker = new SyncStatusTracker(context.getActor(), getId(), context.getConfigParams()
             .getSyncIndexThreshold());
@@ -162,12 +165,11 @@ public class Follower extends AbstractRaftActorBehavior {
         leaderId = appendEntries.getLeaderId();
         leaderPayloadVersion = appendEntries.getPayloadVersion();
 
-        if (appendEntries.getLeaderAddress().isPresent()) {
-            final String address = appendEntries.getLeaderAddress().get();
-            log.debug("New leader address: {}", address);
-
-            context.setPeerAddress(leaderId, address);
-            context.getConfigParams().getPeerAddressResolver().setResolved(leaderId, address);
+        final var leaderAddress = appendEntries.leaderAddress();
+        if (leaderAddress != null) {
+            log.debug("New leader address: {}", leaderAddress);
+            context.setPeerAddress(leaderId, leaderAddress);
+            context.getConfigParams().getPeerAddressResolver().setResolved(leaderId, leaderAddress);
         }
 
         // First check if the logs are in sync or not
@@ -324,8 +326,8 @@ public class Follower extends AbstractRaftActorBehavior {
             shouldCaptureSnapshot.compareAndSet(false,
                     context.getReplicatedLog().shouldCaptureSnapshot(entry.getIndex()));
 
-            if (entry.getData() instanceof ServerConfigurationPayload) {
-                context.updatePeerIds((ServerConfigurationPayload)entry.getData());
+            if (entry.getData() instanceof ServerConfigurationPayload serverConfiguration) {
+                context.updatePeerIds(serverConfiguration);
             }
         }
 
@@ -452,12 +454,11 @@ public class Follower extends AbstractRaftActorBehavior {
             return this;
         }
 
-        if (!(message instanceof RaftRPC)) {
+        if (!(message instanceof RaftRPC rpc)) {
             // The rest of the processing requires the message to be a RaftRPC
             return null;
         }
 
-        final RaftRPC rpc = (RaftRPC) message;
         // If RPC request or response contains term T > currentTerm:
         // set currentTerm = T, convert to follower (§5.1)
         // This applies to all RPC messages and responses
@@ -468,14 +469,14 @@ public class Follower extends AbstractRaftActorBehavior {
             context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
         }
 
-        if (rpc instanceof InstallSnapshot) {
-            handleInstallSnapshot(sender, (InstallSnapshot) rpc);
+        if (rpc instanceof InstallSnapshot installSnapshot) {
+            handleInstallSnapshot(sender, installSnapshot);
             restartLastLeaderMessageTimer();
             scheduleElection(electionDuration());
             return this;
         }
 
-        if (!(rpc instanceof RequestVote) || canGrantVote((RequestVote) rpc)) {
+        if (!(rpc instanceof RequestVote requestVote) || canGrantVote(requestVote)) {
             restartLastLeaderMessageTimer();
             scheduleElection(electionDuration());
         }
@@ -548,7 +549,7 @@ public class Follower extends AbstractRaftActorBehavior {
 
         Address leaderAddress = leaderActor.anchorPath().address();
 
-        CurrentClusterState state = cluster.get().state();
+        CurrentClusterState state = cluster.orElseThrow().state();
         Set<Member> unreachable = state.getUnreachable();
 
         log.debug("{}: Checking for leader {} in the cluster unreachable set {}", logName(), leaderAddress,
@@ -586,7 +587,7 @@ public class Follower extends AbstractRaftActorBehavior {
             return false;
         }
 
-        final Cluster cluster = maybeCluster.get();
+        final Cluster cluster = maybeCluster.orElseThrow();
         final Member selfMember = cluster.selfMember();
 
         final CurrentClusterState state = cluster.state();
@@ -597,7 +598,7 @@ public class Follower extends AbstractRaftActorBehavior {
                         + "all members {} self member: {}", logName(), unreachable, members, selfMember);
 
         // no unreachable peers means we cannot be isolated
-        if (unreachable.size() == 0) {
+        if (unreachable.isEmpty()) {
             return false;
         }
 
@@ -607,11 +608,7 @@ public class Follower extends AbstractRaftActorBehavior {
         membersToCheck.removeAll(unreachable);
 
         // check if the only member not unreachable is us
-        if (membersToCheck.size() == 1 && membersToCheck.iterator().next().equals(selfMember)) {
-            return true;
-        }
-
-        return false;
+        return membersToCheck.size() == 1 && membersToCheck.iterator().next().equals(selfMember);
     }
 
     private void handleInstallSnapshot(final ActorRef sender, final InstallSnapshot installSnapshot) {
@@ -638,7 +635,7 @@ public class Follower extends AbstractRaftActorBehavior {
 
                 Snapshot snapshot = Snapshot.create(
                         context.getSnapshotManager().convertSnapshot(snapshotTracker.getSnapshotBytes()),
-                        new ArrayList<>(),
+                        List.of(),
                         installSnapshot.getLastIncludedIndex(),
                         installSnapshot.getLastIncludedTerm(),
                         installSnapshot.getLastIncludedIndex(),
@@ -672,8 +669,7 @@ public class Follower extends AbstractRaftActorBehavior {
         } catch (IOException e) {
             log.debug("{}: Exception in InstallSnapshot of follower", logName(), e);
 
-            sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(),
-                    -1, false), actor());
+            sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(), -1, false), actor());
 
             closeSnapshotTracker();
         }
index 32c6da4b527431d9e874000e6dd0acaff696a9d5..2586f2091eec649afa80fbece5d3efe2ac999556 100644 (file)
@@ -7,10 +7,6 @@
  */
 package org.opendaylight.controller.cluster.raft.behaviors;
 
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import org.opendaylight.yangtools.util.AbstractStringIdentifier;
 
 /**
@@ -18,44 +14,16 @@ import org.opendaylight.yangtools.util.AbstractStringIdentifier;
  *
  * @author Thomas Pantelis
  */
-class FollowerIdentifier extends AbstractStringIdentifier<FollowerIdentifier> {
+final class FollowerIdentifier extends AbstractStringIdentifier<FollowerIdentifier> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
-    FollowerIdentifier(String followerId) {
+    FollowerIdentifier(final String followerId) {
         super(followerId);
     }
 
+    @java.io.Serial
     private Object writeReplace() {
-        return new Proxy(this);
-    }
-
-    private static class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private FollowerIdentifier identifier;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-        }
-
-        Proxy(FollowerIdentifier identifier) {
-            this.identifier = identifier;
-        }
-
-        @Override
-        public void writeExternal(ObjectOutput out) throws IOException {
-            out.writeObject(identifier.getValue());
-        }
-
-        @Override
-        public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-            identifier = new FollowerIdentifier((String) in.readObject());
-        }
-
-        private Object readResolve() {
-            return identifier;
-        }
+        return new FI(getValue());
     }
 }
index 3534ac5cf142eda058ceb1e39b90d311b096b20b..0e293520523c39c2e8a7e70f0d924373b90a67a5 100644 (file)
@@ -139,7 +139,7 @@ public class Leader extends AbstractLeader {
 
         final Optional<String> requestedFollowerIdOptional
                 = leadershipTransferContext.transferCohort.getRequestedFollowerId();
-        if (requestedFollowerIdOptional.isPresent() && !requestedFollowerIdOptional.get().equals(followerId)) {
+        if (requestedFollowerIdOptional.isPresent() && !requestedFollowerIdOptional.orElseThrow().equals(followerId)) {
             // we want to transfer leadership to specific follower
             return;
         }
index cc4caa32ebacc8511fd4ea610ea6af2af2e53bf4..a2617dc63960c676ccb7227a7922361f76568323 100644 (file)
@@ -38,9 +38,9 @@ public final class LeaderInstallSnapshotState implements AutoCloseable {
     private final int snapshotChunkSize;
     private final String logName;
     private ByteSource snapshotBytes;
-    private int offset = INITIAL_OFFSET;
+    private long offset = INITIAL_OFFSET;
     // the next snapshot chunk is sent only if the replyReceivedForOffset matches offset
-    private int replyReceivedForOffset = -1;
+    private long replyReceivedForOffset = -1;
     // if replyStatus is false, the previous chunk is attempted
     private boolean replyStatus = false;
     private int chunkIndex = FIRST_CHUNK_INDEX;
@@ -49,7 +49,7 @@ public final class LeaderInstallSnapshotState implements AutoCloseable {
     private int nextChunkHashCode = INITIAL_LAST_CHUNK_HASH_CODE;
     private long snapshotSize;
     private InputStream snapshotInputStream;
-    private Stopwatch chunkTimer = Stopwatch.createUnstarted();
+    private final Stopwatch chunkTimer = Stopwatch.createUnstarted();
     private byte[] currentChunk = null;
 
     LeaderInstallSnapshotState(final int snapshotChunkSize, final String logName) {
@@ -75,8 +75,8 @@ public final class LeaderInstallSnapshotState implements AutoCloseable {
         chunkIndex = FIRST_CHUNK_INDEX;
     }
 
-    int incrementOffset() {
-        // if offset is -1 doesnt matter whether it was the initial value or reset, move the offset to 0 to begin with
+    private long incrementOffset() {
+        // if offset is -1 doesn't matter whether it was the initial value or reset, move the offset to 0 to begin with
         if (offset == INITIAL_OFFSET) {
             offset = 0;
         } else {
@@ -139,7 +139,7 @@ public final class LeaderInstallSnapshotState implements AutoCloseable {
     byte[] getNextChunk() throws IOException {
         // increment offset to indicate next chunk is in flight, canSendNextChunk() wont let us hit this again until,
         // markSendStatus() is called with either success or failure
-        int start = incrementOffset();
+        final var start = incrementOffset();
         if (replyStatus || currentChunk == null) {
             int size = snapshotChunkSize;
             if (snapshotChunkSize > snapshotSize) {
@@ -149,11 +149,11 @@ public final class LeaderInstallSnapshotState implements AutoCloseable {
             }
 
             currentChunk = new byte[size];
-            int numRead = snapshotInputStream.read(currentChunk);
+            final var numRead = snapshotInputStream.read(currentChunk);
             if (numRead != size) {
                 throw new IOException(String.format(
-                        "The # of bytes read from the input stream, %d,"
-                                + "does not match the expected # %d", numRead, size));
+                        "The # of bytes read from the input stream, %d, does not match the expected # %d",
+                        numRead, size));
             }
 
             nextChunkHashCode = Arrays.hashCode(currentChunk);
@@ -183,7 +183,7 @@ public final class LeaderInstallSnapshotState implements AutoCloseable {
         try {
             snapshotInputStream = snapshotBytes.openStream();
         } catch (IOException e) {
-            throw new RuntimeException(e);
+            throw new IllegalStateException(e);
         }
     }
 
@@ -198,7 +198,7 @@ public final class LeaderInstallSnapshotState implements AutoCloseable {
             try {
                 snapshotInputStream.close();
             } catch (IOException e) {
-                LOG.warn("{}: Error closing snapshot stream", logName);
+                LOG.warn("{}: Error closing snapshot stream", logName, e);
             }
 
             snapshotInputStream = null;
index 1538bed74cfbde6b225768633b20cdddb0279f85..c3b75161b05a2d314ebb65026fd6d86dfb753062 100644 (file)
@@ -48,11 +48,12 @@ class SnapshotTracker implements AutoCloseable {
      * @param lastChunkHashCode the optional hash code for the chunk
      * @return true if this is the last chunk is received
      * @throws InvalidChunkException if the chunk index is invalid or out of order
+     * @throws IOException if there is a problem writing to the stream
      */
     boolean addChunk(final int chunkIndex, final byte[] chunk, final OptionalInt maybeLastChunkHashCode)
-            throws InvalidChunkException, IOException {
+            throws IOException {
         log.debug("addChunk: chunkIndex={}, lastChunkIndex={}, collectedChunks.size={}, lastChunkHashCode={}",
-                chunkIndex, lastChunkIndex, count, this.lastChunkHashCode);
+                chunkIndex, lastChunkIndex, count, lastChunkHashCode);
 
         if (sealed) {
             throw new InvalidChunkException("Invalid chunk received with chunkIndex " + chunkIndex
@@ -63,10 +64,10 @@ class SnapshotTracker implements AutoCloseable {
             throw new InvalidChunkException("Expected chunkIndex " + (lastChunkIndex + 1) + " got " + chunkIndex);
         }
 
-        if (maybeLastChunkHashCode.isPresent() && maybeLastChunkHashCode.getAsInt() != this.lastChunkHashCode) {
+        if (maybeLastChunkHashCode.isPresent() && maybeLastChunkHashCode.orElseThrow() != lastChunkHashCode) {
             throw new InvalidChunkException("The hash code of the recorded last chunk does not match "
-                    + "the senders hash code, expected " + this.lastChunkHashCode + " was "
-                    + maybeLastChunkHashCode.getAsInt());
+                    + "the senders hash code, expected " + lastChunkHashCode + " was "
+                    + maybeLastChunkHashCode.orElseThrow());
         }
 
         bufferedStream.write(chunk);
@@ -74,7 +75,7 @@ class SnapshotTracker implements AutoCloseable {
         count += chunk.length;
         sealed = chunkIndex == totalChunks;
         lastChunkIndex = chunkIndex;
-        this.lastChunkHashCode = Arrays.hashCode(chunk);
+        lastChunkHashCode = Arrays.hashCode(chunk);
         return sealed;
     }
 
index b670243b42f3167cc124f0b929d2ed9b8079e495..7fba245bf26fedd040e09c0b13dfc0cb425e2bcf 100644 (file)
@@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.raft.client.messages;
 
 import akka.dispatch.ControlMessage;
 import java.io.Serializable;
-import org.opendaylight.controller.cluster.raft.base.messages.EmptyExternalizableProxy;
 
 /**
  * Message sent to a raft actor to shutdown gracefully. If it's the leader it will transfer leadership to a
@@ -19,25 +18,18 @@ import org.opendaylight.controller.cluster.raft.base.messages.EmptyExternalizabl
  * @author Thomas Pantelis
  */
 public final class Shutdown implements Serializable, ControlMessage {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+
     public static final Shutdown INSTANCE = new Shutdown();
 
     private Shutdown() {
         // Hidden on purpose
     }
 
-    private Object writeReplace() {
-        return new Proxy();
-    }
-
-    private static class Proxy extends EmptyExternalizableProxy {
-        private static final long serialVersionUID = 1L;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            super(INSTANCE);
-        }
+    @java.io.Serial
+    @SuppressWarnings("static-method")
+    private Object readResolve() {
+        return INSTANCE;
     }
 }
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AE.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AE.java
new file mode 100644 (file)
index 0000000..491ca3f
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.messages;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.collect.ImmutableList;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.controller.cluster.raft.RaftVersions;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Argon serialization proxy for {@link AppendEntries}.
+ */
+final class AE implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private AppendEntries appendEntries;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public AE() {
+        // For Externalizable
+    }
+
+    AE(final AppendEntries appendEntries) {
+        this.appendEntries = requireNonNull(appendEntries);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        out.writeShort(appendEntries.getLeaderRaftVersion());
+        WritableObjects.writeLong(out, appendEntries.getTerm());
+        out.writeObject(appendEntries.getLeaderId());
+
+        WritableObjects.writeLongs(out, appendEntries.getPrevLogTerm(), appendEntries.getPrevLogIndex());
+        WritableObjects.writeLongs(out, appendEntries.getLeaderCommit(), appendEntries.getReplicatedToAllIndex());
+
+        out.writeShort(appendEntries.getPayloadVersion());
+
+        final var entries = appendEntries.getEntries();
+        out.writeInt(entries.size());
+        for (var e : entries) {
+            WritableObjects.writeLongs(out, e.getIndex(), e.getTerm());
+            out.writeObject(e.getData());
+        }
+
+        out.writeObject(appendEntries.leaderAddress());
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        short leaderRaftVersion = in.readShort();
+        long term = WritableObjects.readLong(in);
+        String leaderId = (String) in.readObject();
+
+        byte hdr = WritableObjects.readLongHeader(in);
+        long prevLogTerm = WritableObjects.readFirstLong(in, hdr);
+        long prevLogIndex = WritableObjects.readSecondLong(in, hdr);
+
+        hdr = WritableObjects.readLongHeader(in);
+        long leaderCommit = WritableObjects.readFirstLong(in, hdr);
+        long replicatedToAllIndex = WritableObjects.readSecondLong(in, hdr);
+        short payloadVersion = in.readShort();
+
+        int size = in.readInt();
+        var entries = ImmutableList.<ReplicatedLogEntry>builderWithExpectedSize(size);
+        for (int i = 0; i < size; i++) {
+            hdr = WritableObjects.readLongHeader(in);
+            entries.add(new SimpleReplicatedLogEntry(WritableObjects.readFirstLong(in, hdr),
+                WritableObjects.readSecondLong(in, hdr), (Payload) in.readObject()));
+        }
+
+        String leaderAddress = (String)in.readObject();
+
+        appendEntries = new AppendEntries(term, leaderId, prevLogIndex, prevLogTerm, entries.build(), leaderCommit,
+                replicatedToAllIndex, payloadVersion, RaftVersions.CURRENT_VERSION, leaderRaftVersion,
+                leaderAddress);
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(appendEntries);
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AR.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AR.java
new file mode 100644 (file)
index 0000000..6aa2ed8
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.messages;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.controller.cluster.raft.RaftVersions;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link AppendEntriesReply}.
+ */
+final class AR implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    // Flag bits
+    private static final int SUCCESS                = 0x10;
+    private static final int FORCE_INSTALL_SNAPSHOT = 0x20;
+    private static final int NEEDS_LEADER_ADDRESS   = 0x40;
+
+    private AppendEntriesReply appendEntriesReply;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public AR() {
+        // For Externalizable
+    }
+
+    AR(final AppendEntriesReply appendEntriesReply) {
+        this.appendEntriesReply = requireNonNull(appendEntriesReply);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        out.writeShort(appendEntriesReply.getRaftVersion());
+
+        int flags = 0;
+        if (appendEntriesReply.isSuccess()) {
+            flags |= SUCCESS;
+        }
+        if (appendEntriesReply.isForceInstallSnapshot()) {
+            flags |= FORCE_INSTALL_SNAPSHOT;
+        }
+        if (appendEntriesReply.isNeedsLeaderAddress()) {
+            flags |= NEEDS_LEADER_ADDRESS;
+        }
+        WritableObjects.writeLong(out, appendEntriesReply.getTerm(), flags);
+
+        out.writeObject(appendEntriesReply.getFollowerId());
+
+        WritableObjects.writeLongs(out, appendEntriesReply.getLogLastIndex(), appendEntriesReply.getLogLastTerm());
+
+        out.writeShort(appendEntriesReply.getPayloadVersion());
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        short raftVersion = in.readShort();
+
+        byte hdr = WritableObjects.readLongHeader(in);
+        final int flags = WritableObjects.longHeaderFlags(hdr);
+
+        long term = WritableObjects.readLongBody(in, hdr);
+        String followerId = (String) in.readObject();
+
+        hdr = WritableObjects.readLongHeader(in);
+        long logLastIndex = WritableObjects.readFirstLong(in, hdr);
+        long logLastTerm = WritableObjects.readSecondLong(in, hdr);
+
+        short payloadVersion = in.readShort();
+
+        appendEntriesReply = new AppendEntriesReply(followerId, term, getFlag(flags, SUCCESS), logLastIndex,
+            logLastTerm, payloadVersion, getFlag(flags, FORCE_INSTALL_SNAPSHOT), getFlag(flags, NEEDS_LEADER_ADDRESS),
+            raftVersion, RaftVersions.CURRENT_VERSION);
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(appendEntriesReply);
+    }
+
+    private static boolean getFlag(final int flags, final int bit) {
+        return (flags & bit) != 0;
+    }
+}
index 76edc54990c0c6a94de31be56adcc6b600880d79..038ad48b8e4aa0cae2778a3e0fce110a55d44a6f 100644 (file)
@@ -8,6 +8,7 @@
 package org.opendaylight.controller.cluster.raft.messages;
 
 public abstract class AbstractRaftRPC implements RaftRPC {
+    @java.io.Serial
     private static final long serialVersionUID = -6061342433962854822L;
 
     // term
@@ -23,5 +24,6 @@ public abstract class AbstractRaftRPC implements RaftRPC {
     }
 
     // All implementations must use Externalizable Proxy pattern
+    @java.io.Serial
     abstract Object writeReplace();
 }
index 6bdb7a499e0b22185c5f28c9f7d3f3e47da32a92..892ea3356a58259b5a91b6ab3ea7bcfda8ef70b9 100644 (file)
@@ -10,24 +10,23 @@ package org.opendaylight.controller.cluster.raft.messages;
 import static java.util.Objects.requireNonNull;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableList;
 import java.io.Externalizable;
 import java.io.IOException;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
-import java.util.ArrayList;
 import java.util.List;
-import java.util.Optional;
 import org.eclipse.jdt.annotation.NonNull;
 import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.raft.RaftVersions;
 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
 import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 
 /**
  * Invoked by leader to replicate log entries (§5.3); also used as heartbeat (§5.2).
  */
 public final class AppendEntries extends AbstractRaftRPC {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     // So that follower can redirect clients
@@ -56,7 +55,7 @@ public final class AppendEntries extends AbstractRaftRPC {
 
     private final String leaderAddress;
 
-    private AppendEntries(final long term, @NonNull final String leaderId, final long prevLogIndex,
+    AppendEntries(final long term, @NonNull final String leaderId, final long prevLogIndex,
             final long prevLogTerm, @NonNull final List<ReplicatedLogEntry> entries, final long leaderCommit,
             final long replicatedToAllIndex, final short payloadVersion, final short recipientRaftVersion,
             final short leaderRaftVersion, @Nullable final String leaderAddress) {
@@ -117,8 +116,8 @@ public final class AppendEntries extends AbstractRaftRPC {
         return payloadVersion;
     }
 
-    public Optional<String> getLeaderAddress() {
-        return Optional.ofNullable(leaderAddress);
+    public @Nullable String leaderAddress() {
+        return leaderAddress;
     }
 
     public short getLeaderRaftVersion() {
@@ -141,13 +140,14 @@ public final class AppendEntries extends AbstractRaftRPC {
 
     @Override
     Object writeReplace() {
-        return recipientRaftVersion >= RaftVersions.FLUORINE_VERSION ? new ProxyV2(this) : new Proxy(this);
+        return recipientRaftVersion <= RaftVersions.FLUORINE_VERSION ? new ProxyV2(this) : new AE(this);
     }
 
     /**
      * Fluorine version that adds the leader address.
      */
     private static class ProxyV2 implements Externalizable {
+        @java.io.Serial
         private static final long serialVersionUID = 1L;
 
         private AppendEntries appendEntries;
@@ -195,80 +195,19 @@ public final class AppendEntries extends AbstractRaftRPC {
             short payloadVersion = in.readShort();
 
             int size = in.readInt();
-            List<ReplicatedLogEntry> entries = new ArrayList<>(size);
+            var entries = ImmutableList.<ReplicatedLogEntry>builderWithExpectedSize(size);
             for (int i = 0; i < size; i++) {
                 entries.add(new SimpleReplicatedLogEntry(in.readLong(), in.readLong(), (Payload) in.readObject()));
             }
 
             String leaderAddress = (String)in.readObject();
 
-            appendEntries = new AppendEntries(term, leaderId, prevLogIndex, prevLogTerm, entries, leaderCommit,
+            appendEntries = new AppendEntries(term, leaderId, prevLogIndex, prevLogTerm, entries.build(), leaderCommit,
                     replicatedToAllIndex, payloadVersion, RaftVersions.CURRENT_VERSION, leaderRaftVersion,
                     leaderAddress);
         }
 
-        private Object readResolve() {
-            return appendEntries;
-        }
-    }
-
-    /**
-     * Pre-Fluorine version.
-     */
-    @Deprecated
-    private static class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private AppendEntries appendEntries;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-        }
-
-        Proxy(final AppendEntries appendEntries) {
-            this.appendEntries = appendEntries;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeLong(appendEntries.getTerm());
-            out.writeObject(appendEntries.leaderId);
-            out.writeLong(appendEntries.prevLogTerm);
-            out.writeLong(appendEntries.prevLogIndex);
-            out.writeLong(appendEntries.leaderCommit);
-            out.writeLong(appendEntries.replicatedToAllIndex);
-            out.writeShort(appendEntries.payloadVersion);
-
-            out.writeInt(appendEntries.entries.size());
-            for (ReplicatedLogEntry e: appendEntries.entries) {
-                out.writeLong(e.getIndex());
-                out.writeLong(e.getTerm());
-                out.writeObject(e.getData());
-            }
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-            long term = in.readLong();
-            String leaderId = (String) in.readObject();
-            long prevLogTerm = in.readLong();
-            long prevLogIndex = in.readLong();
-            long leaderCommit = in.readLong();
-            long replicatedToAllIndex = in.readLong();
-            short payloadVersion = in.readShort();
-
-            int size = in.readInt();
-            List<ReplicatedLogEntry> entries = new ArrayList<>(size);
-            for (int i = 0; i < size; i++) {
-                entries.add(new SimpleReplicatedLogEntry(in.readLong(), in.readLong(), (Payload) in.readObject()));
-            }
-
-            appendEntries = new AppendEntries(term, leaderId, prevLogIndex, prevLogTerm, entries, leaderCommit,
-                replicatedToAllIndex, payloadVersion, RaftVersions.CURRENT_VERSION, RaftVersions.BORON_VERSION, null);
-        }
-
+        @java.io.Serial
         private Object readResolve() {
             return appendEntries;
         }
index ef2469790b00650594d420769cc6a9410ff2ce23..033a19a7b26e758a30430b902085649af4c886ef 100644 (file)
@@ -18,6 +18,7 @@ import org.opendaylight.controller.cluster.raft.RaftVersions;
  * Reply for the AppendEntries message.
  */
 public final class AppendEntriesReply extends AbstractRaftRPC {
+    @java.io.Serial
     private static final long serialVersionUID = -7487547356392536683L;
 
     // true if follower contained entry matching
@@ -59,7 +60,7 @@ public final class AppendEntriesReply extends AbstractRaftRPC {
                 needsLeaderAddress, RaftVersions.CURRENT_VERSION, recipientRaftVersion);
     }
 
-    private AppendEntriesReply(final String followerId, final long term, final boolean success, final long logLastIndex,
+    AppendEntriesReply(final String followerId, final long term, final boolean success, final long logLastIndex,
             final long logLastTerm, final short payloadVersion, final boolean forceInstallSnapshot,
             final boolean needsLeaderAddress, final short raftVersion, final short recipientRaftVersion) {
         super(term);
@@ -117,13 +118,14 @@ public final class AppendEntriesReply extends AbstractRaftRPC {
 
     @Override
     Object writeReplace() {
-        return recipientRaftVersion >= RaftVersions.FLUORINE_VERSION ? new Proxy2(this) : new Proxy(this);
+        return recipientRaftVersion <= RaftVersions.FLUORINE_VERSION ? new Proxy2(this) : new AR(this);
     }
 
     /**
      * Fluorine version that adds the needsLeaderAddress flag.
      */
     private static class Proxy2 implements Externalizable {
+        @java.io.Serial
         private static final long serialVersionUID = 1L;
 
         private AppendEntriesReply appendEntriesReply;
@@ -168,57 +170,7 @@ public final class AppendEntriesReply extends AbstractRaftRPC {
                     RaftVersions.CURRENT_VERSION);
         }
 
-        private Object readResolve() {
-            return appendEntriesReply;
-        }
-    }
-
-    /**
-     * Pre-Fluorine version.
-     */
-    @Deprecated
-    private static class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private AppendEntriesReply appendEntriesReply;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-        }
-
-        Proxy(final AppendEntriesReply appendEntriesReply) {
-            this.appendEntriesReply = appendEntriesReply;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeShort(appendEntriesReply.raftVersion);
-            out.writeLong(appendEntriesReply.getTerm());
-            out.writeObject(appendEntriesReply.followerId);
-            out.writeBoolean(appendEntriesReply.success);
-            out.writeLong(appendEntriesReply.logLastIndex);
-            out.writeLong(appendEntriesReply.logLastTerm);
-            out.writeShort(appendEntriesReply.payloadVersion);
-            out.writeBoolean(appendEntriesReply.forceInstallSnapshot);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-            short raftVersion = in.readShort();
-            long term = in.readLong();
-            String followerId = (String) in.readObject();
-            boolean success = in.readBoolean();
-            long logLastIndex = in.readLong();
-            long logLastTerm = in.readLong();
-            short payloadVersion = in.readShort();
-            boolean forceInstallSnapshot = in.readBoolean();
-
-            appendEntriesReply = new AppendEntriesReply(followerId, term, success, logLastIndex, logLastTerm,
-                    payloadVersion, forceInstallSnapshot, false, raftVersion, RaftVersions.CURRENT_VERSION);
-        }
-
+        @java.io.Serial
         private Object readResolve() {
             return appendEntriesReply;
         }
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/IR.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/IR.java
new file mode 100644 (file)
index 0000000..e9d95d8
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.messages;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link InstallSnapshotReply}.
+ */
+final class IR implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    // Flags
+    private static final int SUCCESS = 0x10;
+
+    private InstallSnapshotReply installSnapshotReply;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public IR() {
+        // For Externalizable
+    }
+
+    IR(final InstallSnapshotReply installSnapshotReply) {
+        this.installSnapshotReply = requireNonNull(installSnapshotReply);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        WritableObjects.writeLong(out, installSnapshotReply.getTerm(), installSnapshotReply.isSuccess() ? SUCCESS : 0);
+        out.writeObject(installSnapshotReply.getFollowerId());
+        out.writeInt(installSnapshotReply.getChunkIndex());
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        final byte hdr = WritableObjects.readLongHeader(in);
+        final int flags = WritableObjects.longHeaderFlags(hdr);
+
+        long term = WritableObjects.readLongBody(in, hdr);
+        String followerId = (String) in.readObject();
+        int chunkIndex = in.readInt();
+
+        installSnapshotReply = new InstallSnapshotReply(term, followerId, chunkIndex, (flags & SUCCESS) != 0);
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(installSnapshotReply);
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/IS.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/IS.java
new file mode 100644 (file)
index 0000000..3247bb2
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.messages;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.Optional;
+import java.util.OptionalInt;
+import org.opendaylight.controller.cluster.raft.RaftVersions;
+import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link InstallSnapshot}.
+ */
+final class IS implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    // Flags
+    private static final int LAST_CHUNK_HASHCODE = 0x10;
+    private static final int SERVER_CONFIG       = 0x20;
+
+    private InstallSnapshot installSnapshot;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public IS() {
+        // For Externalizable
+    }
+
+    IS(final InstallSnapshot installSnapshot) {
+        this.installSnapshot = requireNonNull(installSnapshot);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        int flags = 0;
+        final var lastChunkHashCode = installSnapshot.getLastChunkHashCode();
+        if (lastChunkHashCode.isPresent()) {
+            flags |= LAST_CHUNK_HASHCODE;
+        }
+        final var serverConfig = installSnapshot.getServerConfig();
+        if (serverConfig.isPresent()) {
+            flags |= SERVER_CONFIG;
+        }
+
+        WritableObjects.writeLong(out, installSnapshot.getTerm(), flags);
+        out.writeObject(installSnapshot.getLeaderId());
+        WritableObjects.writeLongs(out, installSnapshot.getLastIncludedIndex(), installSnapshot.getLastIncludedTerm());
+        out.writeInt(installSnapshot.getChunkIndex());
+        out.writeInt(installSnapshot.getTotalChunks());
+
+        if (lastChunkHashCode.isPresent()) {
+            out.writeInt(lastChunkHashCode.orElseThrow());
+        }
+        if (serverConfig.isPresent()) {
+            out.writeObject(serverConfig.orElseThrow());
+        }
+
+        out.writeObject(installSnapshot.getData());
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        byte hdr = WritableObjects.readLongHeader(in);
+        final int flags = WritableObjects.longHeaderFlags(hdr);
+
+        long term = WritableObjects.readLongBody(in, hdr);
+        String leaderId = (String) in.readObject();
+
+        hdr = WritableObjects.readLongHeader(in);
+        long lastIncludedIndex = WritableObjects.readFirstLong(in, hdr);
+        long lastIncludedTerm = WritableObjects.readSecondLong(in, hdr);
+        int chunkIndex = in.readInt();
+        int totalChunks = in.readInt();
+
+        OptionalInt lastChunkHashCode = getFlag(flags, LAST_CHUNK_HASHCODE) ? OptionalInt.of(in.readInt())
+            : OptionalInt.empty();
+        Optional<ServerConfigurationPayload> serverConfig = getFlag(flags, SERVER_CONFIG)
+                ? Optional.of((ServerConfigurationPayload)in.readObject()) : Optional.empty();
+
+        byte[] data = (byte[])in.readObject();
+
+        installSnapshot = new InstallSnapshot(term, leaderId, lastIncludedIndex, lastIncludedTerm, data,
+                chunkIndex, totalChunks, lastChunkHashCode, serverConfig, RaftVersions.CURRENT_VERSION);
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(installSnapshot);
+    }
+
+    private static boolean getFlag(final int flags, final int bit) {
+        return (flags & bit) != 0;
+    }
+}
+
@@ -5,11 +5,12 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
-package org.opendaylight.controller.cluster.raft.protobuff.client.messages;
+package org.opendaylight.controller.cluster.raft.messages;
 
 import org.opendaylight.yangtools.concepts.Identifiable;
 import org.opendaylight.yangtools.concepts.Identifier;
 
 public abstract class IdentifiablePayload<T extends Identifier> extends Payload implements Identifiable<T> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
 }
index 60c54f7fd01f03996f166ef562ff251bd34c4d56..3cd470f6cc6d3c7186dc4fb20324482b216153d7 100644 (file)
@@ -7,6 +7,7 @@
  */
 package org.opendaylight.controller.cluster.raft.messages;
 
+import com.google.common.annotations.VisibleForTesting;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.Externalizable;
 import java.io.IOException;
@@ -14,12 +15,14 @@ import java.io.ObjectInput;
 import java.io.ObjectOutput;
 import java.util.Optional;
 import java.util.OptionalInt;
+import org.opendaylight.controller.cluster.raft.RaftVersions;
 import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
 
 /**
  * Message sent from a leader to install a snapshot chunk on a follower.
  */
 public final class InstallSnapshot extends AbstractRaftRPC {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final String leaderId;
@@ -32,13 +35,16 @@ public final class InstallSnapshot extends AbstractRaftRPC {
     private final OptionalInt lastChunkHashCode;
     @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "Handled via writeReplace()")
     private final Optional<ServerConfigurationPayload> serverConfig;
+    private final short recipientRaftVersion;
 
-    @SuppressFBWarnings(value = "EI_EXPOSE_REP2", justification = "Stores a reference to an externally mutable byte[] "
-            + "object but this is OK since this class is merely a DTO and does not process byte[] internally. "
-            + "Also it would be inefficient to create a copy as the byte[] could be large.")
+    @SuppressFBWarnings(value = "EI_EXPOSE_REP2", justification = """
+        Stores a reference to an externally mutable byte[] object but this is OK since this class is merely a DTO and \
+        does not process byte[] internally. Also it would be inefficient to create a copy as the byte[] could be \
+        large.""")
     public InstallSnapshot(final long term, final String leaderId, final long lastIncludedIndex,
             final long lastIncludedTerm, final byte[] data, final int chunkIndex, final int totalChunks,
-            final OptionalInt lastChunkHashCode, final Optional<ServerConfigurationPayload> serverConfig) {
+            final OptionalInt lastChunkHashCode, final Optional<ServerConfigurationPayload> serverConfig,
+            final short recipientRaftVersion) {
         super(term);
         this.leaderId = leaderId;
         this.lastIncludedIndex = lastIncludedIndex;
@@ -48,13 +54,15 @@ public final class InstallSnapshot extends AbstractRaftRPC {
         this.totalChunks = totalChunks;
         this.lastChunkHashCode = lastChunkHashCode;
         this.serverConfig = serverConfig;
+        this.recipientRaftVersion = recipientRaftVersion;
     }
 
+    @VisibleForTesting
     public InstallSnapshot(final long term, final String leaderId, final long lastIncludedIndex,
                            final long lastIncludedTerm, final byte[] data, final int chunkIndex,
                            final int totalChunks) {
         this(term, leaderId, lastIncludedIndex, lastIncludedTerm, data, chunkIndex, totalChunks, OptionalInt.empty(),
-            Optional.empty());
+            Optional.empty(), RaftVersions.CURRENT_VERSION);
     }
 
     public String getLeaderId() {
@@ -69,9 +77,10 @@ public final class InstallSnapshot extends AbstractRaftRPC {
         return lastIncludedTerm;
     }
 
-    @SuppressFBWarnings(value = "EI_EXPOSE_REP", justification = "Exposes a mutable object stored in a field but "
-            + "this is OK since this class is merely a DTO and does not process the byte[] internally. "
-            + "Also it would be inefficient to create a return copy as the byte[] could be large.")
+    @SuppressFBWarnings(value = "EI_EXPOSE_REP", justification = """
+        Exposes a mutable object stored in a field but this is OK since this class is merely a DTO and does not \
+        process the byte[] internally. Also it would be inefficient to create a return copy as the byte[] could be \
+        large.""")
     public byte[] getData() {
         return data;
     }
@@ -92,10 +101,6 @@ public final class InstallSnapshot extends AbstractRaftRPC {
         return serverConfig;
     }
 
-    public <T> Object toSerializable(final short version) {
-        return this;
-    }
-
     @Override
     public String toString() {
         return "InstallSnapshot [term=" + getTerm() + ", leaderId=" + leaderId + ", lastIncludedIndex="
@@ -106,10 +111,11 @@ public final class InstallSnapshot extends AbstractRaftRPC {
 
     @Override
     Object writeReplace() {
-        return new Proxy(this);
+        return recipientRaftVersion <= RaftVersions.FLUORINE_VERSION ? new Proxy(this) : new IS(this);
     }
 
     private static class Proxy implements Externalizable {
+        @java.io.Serial
         private static final long serialVersionUID = 1L;
 
         private InstallSnapshot installSnapshot;
@@ -135,12 +141,12 @@ public final class InstallSnapshot extends AbstractRaftRPC {
 
             out.writeByte(installSnapshot.lastChunkHashCode.isPresent() ? 1 : 0);
             if (installSnapshot.lastChunkHashCode.isPresent()) {
-                out.writeInt(installSnapshot.lastChunkHashCode.getAsInt());
+                out.writeInt(installSnapshot.lastChunkHashCode.orElseThrow());
             }
 
             out.writeByte(installSnapshot.serverConfig.isPresent() ? 1 : 0);
             if (installSnapshot.serverConfig.isPresent()) {
-                out.writeObject(installSnapshot.serverConfig.get());
+                out.writeObject(installSnapshot.serverConfig.orElseThrow());
             }
 
             out.writeObject(installSnapshot.data);
@@ -162,9 +168,10 @@ public final class InstallSnapshot extends AbstractRaftRPC {
             byte[] data = (byte[])in.readObject();
 
             installSnapshot = new InstallSnapshot(term, leaderId, lastIncludedIndex, lastIncludedTerm, data,
-                    chunkIndex, totalChunks, lastChunkHashCode, serverConfig);
+                    chunkIndex, totalChunks, lastChunkHashCode, serverConfig, RaftVersions.CURRENT_VERSION);
         }
 
+        @java.io.Serial
         private Object readResolve() {
             return installSnapshot;
         }
index 693fe7e7b8d5eb96d046eebb92e446ff9acf0b60..ed8b2800816f29a42c46b34adfa1aa5ca3004067 100644 (file)
@@ -7,12 +7,8 @@
  */
 package org.opendaylight.controller.cluster.raft.messages;
 
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-
 public final class InstallSnapshotReply extends AbstractRaftRPC {
+    @java.io.Serial
     private static final long serialVersionUID = 642227896390779503L;
 
     // The followerId - this will be used to figure out which follower is
@@ -50,44 +46,6 @@ public final class InstallSnapshotReply extends AbstractRaftRPC {
 
     @Override
     Object writeReplace() {
-        return new Proxy(this);
-    }
-
-    private static class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private InstallSnapshotReply installSnapshotReply;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-        }
-
-        Proxy(final InstallSnapshotReply installSnapshotReply) {
-            this.installSnapshotReply = installSnapshotReply;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeLong(installSnapshotReply.getTerm());
-            out.writeObject(installSnapshotReply.followerId);
-            out.writeInt(installSnapshotReply.chunkIndex);
-            out.writeBoolean(installSnapshotReply.success);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-            long term = in.readLong();
-            String followerId = (String) in.readObject();
-            int chunkIndex = in.readInt();
-            boolean success = in.readBoolean();
-
-            installSnapshotReply = new InstallSnapshotReply(term, followerId, chunkIndex, success);
-        }
-
-        private Object readResolve() {
-            return installSnapshotReply;
-        }
+        return new IR(this);
     }
 }
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/Payload.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/Payload.java
new file mode 100644 (file)
index 0000000..c75385a
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.messages;
+
+import java.io.Serializable;
+
+/**
+ * An instance of a {@link Payload} class is meant to be used as the Payload for {@link AppendEntries}.
+ *
+ * <p>
+ * When an actor which is derived from RaftActor attempts to persistData it must pass an instance of the Payload class.
+ * Similarly when state needs to be applied to the derived RaftActor it will be passed an instance of the Payload class.
+ */
+public abstract class Payload implements Serializable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    /**
+     * Return the estimate of in-memory size of this payload.
+     *
+     * @return An estimate of the in-memory size of this payload.
+     */
+    public abstract int size();
+
+    /**
+     * Return the estimate of serialized size of this payload when passed through serialization. The estimate needs to
+     * be reasonably accurate and should err on the side of caution and report a slightly-higher size in face of
+     * uncertainty.
+     *
+     * @return An estimate of serialized size.
+     */
+    public abstract int serializedSize();
+
+    /**
+     * Return the serialization proxy for this object.
+     *
+     * @return Serialization proxy
+     */
+    @java.io.Serial
+    protected abstract Object writeReplace();
+}
@@ -5,7 +5,7 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-package org.opendaylight.controller.cluster.raft.protobuff.client.messages;
+package org.opendaylight.controller.cluster.raft.messages;
 
 /**
  * This is a tagging interface for a Payload implementation that needs to always be persisted regardless of
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RV.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RV.java
new file mode 100644 (file)
index 0000000..b75f1b7
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.messages;
+
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link RequestVote}.
+ */
+final class RV implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private RequestVote requestVote;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public RV() {
+        // For Externalizable
+    }
+
+    RV(final RequestVote requestVote) {
+        this.requestVote = requireNonNull(requestVote);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        WritableObjects.writeLong(out, requestVote.getTerm());
+        out.writeObject(requestVote.getCandidateId());
+        WritableObjects.writeLongs(out, requestVote.getLastLogIndex(), requestVote.getLastLogTerm());
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        long term = WritableObjects.readLong(in);
+        String candidateId = (String) in.readObject();
+
+        final byte hdr = WritableObjects.readLongHeader(in);
+        long lastLogIndex = WritableObjects.readFirstLong(in, hdr);
+        long lastLogTerm = WritableObjects.readSecondLong(in, hdr);
+
+        requestVote = new RequestVote(term, candidateId, lastLogIndex, lastLogTerm);
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return requestVote;
+    }
+}
index b23c76d06f9213d516f0f2a7345f85034c157333..2b33a12950620accf369300b23074710c8b5b3c2 100644 (file)
@@ -7,15 +7,11 @@
  */
 package org.opendaylight.controller.cluster.raft.messages;
 
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-
 /**
  * Invoked by candidates to gather votes (§5.2).
  */
 public final class RequestVote extends AbstractRaftRPC {
+    @java.io.Serial
     private static final long serialVersionUID = -6967509186297108657L;
 
     // candidate requesting vote
@@ -57,44 +53,6 @@ public final class RequestVote extends AbstractRaftRPC {
 
     @Override
     Object writeReplace() {
-        return new Proxy(this);
-    }
-
-    private static class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private RequestVote requestVote;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-        }
-
-        Proxy(final RequestVote requestVote) {
-            this.requestVote = requestVote;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeLong(requestVote.getTerm());
-            out.writeObject(requestVote.candidateId);
-            out.writeLong(requestVote.lastLogIndex);
-            out.writeLong(requestVote.lastLogTerm);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-            long term = in.readLong();
-            String candidateId = (String) in.readObject();
-            long lastLogIndex = in.readLong();
-            long lastLogTerm = in.readLong();
-
-            requestVote = new RequestVote(term, candidateId, lastLogIndex, lastLogTerm);
-        }
-
-        private Object readResolve() {
-            return requestVote;
-        }
+        return new RV(this);
     }
 }
index 2554c17fd85fd1c7dcfc04c824079c1510ed98ee..01fd9abe2e1266572122e155630e4eca82cfb4d1 100644 (file)
@@ -7,12 +7,8 @@
  */
 package org.opendaylight.controller.cluster.raft.messages;
 
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-
 public final class RequestVoteReply extends AbstractRaftRPC {
+    @java.io.Serial
     private static final long serialVersionUID = 8427899326488775660L;
 
     // true means candidate received vote
@@ -34,40 +30,6 @@ public final class RequestVoteReply extends AbstractRaftRPC {
 
     @Override
     Object writeReplace() {
-        return new Proxy(this);
-    }
-
-    private static class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private RequestVoteReply requestVoteReply;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-        }
-
-        Proxy(final RequestVoteReply requestVoteReply) {
-            this.requestVoteReply = requestVoteReply;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeLong(requestVoteReply.getTerm());
-            out.writeBoolean(requestVoteReply.voteGranted);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException {
-            long term = in.readLong();
-            boolean voteGranted = in.readBoolean();
-
-            requestVoteReply = new RequestVoteReply(term, voteGranted);
-        }
-
-        private Object readResolve() {
-            return requestVoteReply;
-        }
+        return new VR(this);
     }
 }
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/VR.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/VR.java
new file mode 100644 (file)
index 0000000..d5a489b
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.messages;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link RequestVoteReply}.
+ */
+final class VR implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    // Flags
+    private static final int VOTE_GRANTED = 0x10;
+
+    private RequestVoteReply requestVoteReply;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public VR() {
+        // For Externalizable
+    }
+
+    VR(final RequestVoteReply requestVoteReply) {
+        this.requestVoteReply = requireNonNull(requestVoteReply);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        WritableObjects.writeLong(out, requestVoteReply.getTerm(), requestVoteReply.isVoteGranted() ? VOTE_GRANTED : 0);
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException {
+        final byte hdr = WritableObjects.readLongHeader(in);
+        requestVoteReply = new RequestVoteReply(WritableObjects.readLongBody(in, hdr),
+            (WritableObjects.longHeaderFlags(hdr) & VOTE_GRANTED) != 0);
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(requestVoteReply);
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/AJE.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/AJE.java
new file mode 100644 (file)
index 0000000..4e39e98
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link ApplyJournalEntries}.
+ */
+final class AJE implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ApplyJournalEntries applyEntries;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public AJE() {
+        // For Externalizable
+    }
+
+    AJE(final ApplyJournalEntries applyEntries) {
+        this.applyEntries = requireNonNull(applyEntries);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        WritableObjects.writeLong(out, applyEntries.getToIndex());
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException {
+        applyEntries = new ApplyJournalEntries(WritableObjects.readLong(in));
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(applyEntries);
+    }
+}
index 3c0a8ac700c7de8e4e1fd156eb07ffbf0761d614..30da667c2695c666f04eaae0d3f2a216772fe125 100644 (file)
@@ -8,10 +8,6 @@
 package org.opendaylight.controller.cluster.raft.persisted;
 
 import akka.dispatch.ControlMessage;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import java.io.Serializable;
 
 /**
@@ -22,38 +18,8 @@ import java.io.Serializable;
  *
  * @author Thomas Pantelis
  */
-public class ApplyJournalEntries implements Serializable, ControlMessage {
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private ApplyJournalEntries applyEntries;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final ApplyJournalEntries applyEntries) {
-            this.applyEntries = applyEntries;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeLong(applyEntries.toIndex);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException {
-            applyEntries = new ApplyJournalEntries(in.readLong());
-        }
-
-        private Object readResolve() {
-            return applyEntries;
-        }
-    }
-
+public final class ApplyJournalEntries implements Serializable, ControlMessage {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final long toIndex;
@@ -66,12 +32,13 @@ public class ApplyJournalEntries implements Serializable, ControlMessage {
         return toIndex;
     }
 
-    private Object writeReplace() {
-        return new Proxy(this);
-    }
-
     @Override
     public String toString() {
         return "ApplyJournalEntries [toIndex=" + toIndex + "]";
     }
+
+    @java.io.Serial
+    private Object writeReplace() {
+        return new AJE(this);
+    }
 }
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/DE.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/DE.java
new file mode 100644 (file)
index 0000000..6bd34c2
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link DeleteEntries}.
+ */
+final class DE implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private DeleteEntries deleteEntries;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public DE() {
+        // For Externalizable
+    }
+
+    DE(final DeleteEntries deleteEntries) {
+        this.deleteEntries = requireNonNull(deleteEntries);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        WritableObjects.writeLong(out, deleteEntries.getFromIndex());
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException {
+        deleteEntries = new DeleteEntries(WritableObjects.readLong(in));
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(deleteEntries);
+    }
+}
index 57f5af3a034e90d31f716eea1da38626832f593f..8b4eb8388af80799ffd77dc439aca9c10a29cac6 100644 (file)
@@ -7,10 +7,6 @@
  */
 package org.opendaylight.controller.cluster.raft.persisted;
 
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import java.io.Serializable;
 
 /**
@@ -18,38 +14,8 @@ import java.io.Serializable;
  *
  * @author Thomas Pantelis
  */
-public class DeleteEntries implements Serializable {
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private DeleteEntries deleteEntries;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final DeleteEntries deleteEntries) {
-            this.deleteEntries = deleteEntries;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeLong(deleteEntries.fromIndex);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException {
-            deleteEntries = new DeleteEntries(in.readLong());
-        }
-
-        private Object readResolve() {
-            return deleteEntries;
-        }
-    }
-
+public final class DeleteEntries implements Serializable {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final long fromIndex;
@@ -62,12 +28,13 @@ public class DeleteEntries implements Serializable {
         return fromIndex;
     }
 
-    private Object writeReplace() {
-        return new Proxy(this);
-    }
-
     @Override
     public String toString() {
         return "DeleteEntries [fromIndex=" + fromIndex + "]";
     }
+
+    @java.io.Serial
+    private Object writeReplace() {
+        return new DE(this);
+    }
 }
index aee90ace41e9907d2823ac842ebc96cea8e77b30..9939e2f2b09b3201159a7cd4ada8fd7a4d990789 100644 (file)
@@ -13,13 +13,16 @@ package org.opendaylight.controller.cluster.raft.persisted;
  * @author Thomas Pantelis
  */
 public final class EmptyState implements Snapshot.State {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public static final EmptyState INSTANCE = new EmptyState();
 
     private EmptyState() {
+        // Hidden on purpose
     }
 
+    @java.io.Serial
     @SuppressWarnings("static-method")
     private Object readResolve() {
         return INSTANCE;
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/LE.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/LE.java
new file mode 100644 (file)
index 0000000..7e609ab
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.persisted;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link SimpleReplicatedLogEntry}.
+ */
+final class LE implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private long index;
+    private long term;
+    private Payload data;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public LE() {
+        // For Externalizable
+    }
+
+    // For size estimation only, use full bit size
+    LE(final Void dummy) {
+        index = Long.MIN_VALUE;
+        term = Long.MIN_VALUE;
+        data = null;
+    }
+
+    LE(final SimpleReplicatedLogEntry logEntry) {
+        index = logEntry.getIndex();
+        term = logEntry.getTerm();
+        data = logEntry.getData();
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        WritableObjects.writeLongs(out, index, term);
+        out.writeObject(data);
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        final byte hdr = WritableObjects.readLongHeader(in);
+        index = WritableObjects.readFirstLong(in, hdr);
+        term = WritableObjects.readSecondLong(in, hdr);
+        data = (Payload) in.readObject();
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return new SimpleReplicatedLogEntry(index, term, data);
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/LegacySerializable.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/LegacySerializable.java
new file mode 100644 (file)
index 0000000..0e75d88
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.persisted;
+
+/**
+ * Marker interface for serializable objects which have been migrated. It implements {@link MigratedSerializable} and
+ * always returns {@code true} from {@link #isMigrated()}. This interface is marked as deprecated , as any of its users
+ * should also be marked as deprecated.
+ */
+@Deprecated
+public interface LegacySerializable extends MigratedSerializable {
+    @Override
+    @Deprecated(forRemoval = true)
+    default boolean isMigrated() {
+        return true;
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/NP.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/NP.java
new file mode 100644 (file)
index 0000000..a041f2f
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.persisted;
+
+import java.io.Serializable;
+
+/**
+ * Serialization proxy for {@link NoopPayload}.
+ */
+// There is no need for Externalizable
+final class NP implements Serializable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    @java.io.Serial
+    private Object readResolve() {
+        return NoopPayload.INSTANCE;
+    }
+}
+
index 46628c6078408445662942029b2f8df004aab2bd..0f076c55d755a3286433d388aad2e33faeef1fdb 100644 (file)
@@ -8,8 +8,9 @@
 package org.opendaylight.controller.cluster.raft.persisted;
 
 import akka.dispatch.ControlMessage;
-import java.io.Serializable;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import org.apache.commons.lang3.SerializationUtils;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 
 /**
  * Payload used for no-op log entries that are put into the journal by the PreLeader in order to commit
@@ -17,22 +18,17 @@ import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payloa
  *
  * @author Thomas Pantelis
  */
-public final class NoopPayload extends Payload implements Serializable, ControlMessage {
-    public static final NoopPayload INSTANCE = new NoopPayload();
-
-    // There is no need for Externalizable
-    private static final class Proxy implements Serializable {
-        private static final long serialVersionUID = 1L;
-
-        private Object readResolve() {
-            return INSTANCE;
-        }
-    }
-
+public final class NoopPayload extends Payload implements ControlMessage {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
-    private static final Proxy PROXY = new Proxy();
+    private static final @NonNull NP PROXY = new NP();
+    // Estimate to how big the proxy is. Note this includes object stream overhead, so it is a bit conservative
+    private static final int PROXY_SIZE = SerializationUtils.serialize(PROXY).length;
+
+    public static final @NonNull NoopPayload INSTANCE = new NoopPayload();
 
     private NoopPayload() {
+        // Hidden on purpose
     }
 
     @Override
@@ -40,7 +36,13 @@ public final class NoopPayload extends Payload implements Serializable, ControlM
         return 0;
     }
 
-    private Object writeReplace() {
+    @Override
+    public int serializedSize() {
+        return PROXY_SIZE;
+    }
+
+    @Override
+    protected Object writeReplace() {
         return PROXY;
     }
 }
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/SS.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/SS.java
new file mode 100644 (file)
index 0000000..0523d08
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.collect.ImmutableList;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
+import org.opendaylight.controller.cluster.raft.persisted.Snapshot.State;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Externalizable proxy for {@link Snapshot}.
+ */
+final class SS implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private Snapshot snapshot;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public SS() {
+        // For Externalizable
+    }
+
+    SS(final Snapshot snapshot) {
+        this.snapshot = requireNonNull(snapshot);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        WritableObjects.writeLongs(out, snapshot.getLastIndex(), snapshot.getLastTerm());
+        WritableObjects.writeLongs(out, snapshot.getLastAppliedIndex(), snapshot.getLastAppliedTerm());
+        WritableObjects.writeLong(out, snapshot.getElectionTerm());
+        out.writeObject(snapshot.getElectionVotedFor());
+        out.writeObject(snapshot.getServerConfiguration());
+
+        final var unAppliedEntries = snapshot.getUnAppliedEntries();
+        out.writeInt(unAppliedEntries.size());
+        for (var e : unAppliedEntries) {
+            WritableObjects.writeLongs(out, e.getIndex(), e.getTerm());
+            out.writeObject(e.getData());
+        }
+
+        out.writeObject(snapshot.getState());
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        byte hdr = WritableObjects.readLongHeader(in);
+        long lastIndex = WritableObjects.readFirstLong(in, hdr);
+        long lastTerm = WritableObjects.readSecondLong(in, hdr);
+
+        hdr = WritableObjects.readLongHeader(in);
+        long lastAppliedIndex = WritableObjects.readFirstLong(in, hdr);
+        long lastAppliedTerm = WritableObjects.readSecondLong(in, hdr);
+        long electionTerm = WritableObjects.readLong(in);
+        String electionVotedFor = (String) in.readObject();
+        ServerConfigurationPayload serverConfig = (ServerConfigurationPayload) in.readObject();
+
+        int size = in.readInt();
+        var unAppliedEntries = ImmutableList.<ReplicatedLogEntry>builderWithExpectedSize(size);
+        for (int i = 0; i < size; i++) {
+            hdr = WritableObjects.readLongHeader(in);
+            unAppliedEntries.add(new SimpleReplicatedLogEntry(
+                WritableObjects.readFirstLong(in, hdr), WritableObjects.readSecondLong(in, hdr),
+                (Payload) in.readObject()));
+        }
+
+        State state = (State) in.readObject();
+
+        snapshot = Snapshot.create(state, unAppliedEntries.build(), lastIndex, lastTerm, lastAppliedIndex,
+            lastAppliedTerm, electionTerm, electionVotedFor, serverConfig);
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(snapshot);
+    }
+}
index 055984229bc2920a5cf1be0d609e2c8d12e0e53d..dbb64f1d82f505c9ea493fe6598ce7f17dbefe77 100644 (file)
@@ -15,12 +15,10 @@ import java.io.IOException;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
 import java.io.ObjectOutputStream;
-import java.io.Serializable;
-import java.util.ArrayList;
 import java.util.List;
 import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.PersistentPayload;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
+import org.opendaylight.controller.cluster.raft.messages.PersistentPayload;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -29,8 +27,9 @@ import org.slf4j.LoggerFactory;
  *
  * @author Thomas Pantelis
  */
-public final class ServerConfigurationPayload extends Payload implements PersistentPayload, Serializable {
+public final class ServerConfigurationPayload extends Payload implements PersistentPayload {
     private static final class Proxy implements Externalizable {
+        @java.io.Serial
         private static final long serialVersionUID = 1L;
 
         private List<ServerInfo> serverConfig;
@@ -43,35 +42,39 @@ public final class ServerConfigurationPayload extends Payload implements Persist
         }
 
         Proxy(final ServerConfigurationPayload payload) {
-            this.serverConfig = payload.getServerConfig();
+            serverConfig = payload.getServerConfig();
         }
 
         @Override
         public void writeExternal(final ObjectOutput out) throws IOException {
             out.writeInt(serverConfig.size());
-            for (ServerInfo i : serverConfig) {
-                out.writeObject(i.getId());
-                out.writeBoolean(i.isVoting());
+            for (var serverInfo : serverConfig) {
+                out.writeObject(serverInfo.peerId());
+                out.writeBoolean(serverInfo.isVoting());
             }
         }
 
         @Override
         public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
             final int size = in.readInt();
-            serverConfig = new ArrayList<>(size);
+
+            final var builder = ImmutableList.<ServerInfo>builderWithExpectedSize(size);
             for (int i = 0; i < size; ++i) {
                 final String id = (String) in.readObject();
                 final boolean voting = in.readBoolean();
-                serverConfig.add(new ServerInfo(id, voting));
+                builder.add(new ServerInfo(id, voting));
             }
+            serverConfig = builder.build();
         }
 
+        @java.io.Serial
         private Object readResolve() {
             return new ServerConfigurationPayload(serverConfig);
         }
     }
 
     private static final Logger LOG = LoggerFactory.getLogger(ServerConfigurationPayload.class);
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
@@ -90,6 +93,11 @@ public final class ServerConfigurationPayload extends Payload implements Persist
 
     @Override
     public int size() {
+        return serializedSize();
+    }
+
+    @Override
+    public int serializedSize() {
         if (serializedSize < 0) {
             try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) {
                 try (ObjectOutputStream out = new ObjectOutputStream(bos)) {
@@ -112,21 +120,9 @@ public final class ServerConfigurationPayload extends Payload implements Persist
     }
 
     @Override
-    public boolean equals(Object obj) {
-        if (this == obj) {
-            return true;
-        }
-
-        if (obj == null) {
-            return false;
-        }
-
-        if (getClass() != obj.getClass()) {
-            return false;
-        }
-
-        ServerConfigurationPayload other = (ServerConfigurationPayload) obj;
-        return serverConfig.equals(other.serverConfig);
+    public boolean equals(final Object obj) {
+        return this == obj || obj instanceof ServerConfigurationPayload other
+            && serverConfig.equals(other.serverConfig);
     }
 
     @Override
@@ -134,7 +130,8 @@ public final class ServerConfigurationPayload extends Payload implements Persist
         return "ServerConfigurationPayload [serverConfig=" + serverConfig + "]";
     }
 
-    private Object writeReplace() {
+    @Override
+    protected Object writeReplace() {
         return new Proxy(this);
     }
 }
index 6e1ca82f77d80571f74e6d4b49229ed02445ea98..de70e17d3609d381451c9d3903a785620a3dfe5e 100644 (file)
@@ -17,47 +17,8 @@ import org.eclipse.jdt.annotation.NonNull;
  *
  * @author Thomas Pantelis
  */
-public final class ServerInfo {
-    private final String id;
-    private final boolean isVoting;
-
-    public ServerInfo(@NonNull String id, boolean isVoting) {
-        this.id = requireNonNull(id);
-        this.isVoting = isVoting;
-    }
-
-    public @NonNull String getId() {
-        return id;
-    }
-
-    public boolean isVoting() {
-        return isVoting;
-    }
-
-    @Override
-    public int hashCode() {
-        final int prime = 31;
-        int result = 1;
-        result = prime * result + Boolean.hashCode(isVoting);
-        result = prime * result + id.hashCode();
-        return result;
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-        if (this == obj) {
-            return true;
-        }
-        if (!(obj instanceof ServerInfo)) {
-            return false;
-        }
-
-        final ServerInfo other = (ServerInfo) obj;
-        return isVoting == other.isVoting && id.equals(other.id);
-    }
-
-    @Override
-    public String toString() {
-        return "ServerInfo [id=" + id + ", isVoting=" + isVoting + "]";
+public record ServerInfo(@NonNull String peerId, boolean isVoting) {
+    public ServerInfo {
+        requireNonNull(peerId);
     }
 }
\ No newline at end of file
index 4c07e6b812e254ad026e6d49211e4e22b97f5b6b..610d53a9e72efcb7bded805e79d7d6a4a5e1b63f 100644 (file)
@@ -9,13 +9,10 @@ package org.opendaylight.controller.cluster.raft.persisted;
 
 import static java.util.Objects.requireNonNull;
 
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import java.io.Serializable;
+import org.apache.commons.lang3.SerializationUtils;
 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 
 /**
  * A {@link ReplicatedLogEntry} implementation.
@@ -23,45 +20,10 @@ import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payloa
  * @author Thomas Pantelis
  */
 public final class SimpleReplicatedLogEntry implements ReplicatedLogEntry, Serializable {
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private ReplicatedLogEntry replicatedLogEntry;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final ReplicatedLogEntry replicatedLogEntry) {
-            this.replicatedLogEntry = replicatedLogEntry;
-        }
-
-        static int estimatedSerializedSize(final ReplicatedLogEntry replicatedLogEntry) {
-            return 8 /* index */ + 8 /* term */ + replicatedLogEntry.getData().size()
-                    + 400 /* estimated extra padding for class info */;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeLong(replicatedLogEntry.getIndex());
-            out.writeLong(replicatedLogEntry.getTerm());
-            out.writeObject(replicatedLogEntry.getData());
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-            replicatedLogEntry = new SimpleReplicatedLogEntry(in.readLong(), in.readLong(), (Payload) in.readObject());
-        }
-
-        private Object readResolve() {
-            return replicatedLogEntry;
-        }
-    }
-
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+    // Estimate to how big the proxy is. Note this includes object stream overhead, so it is a bit conservative.
+    private static final int PROXY_SIZE = SerializationUtils.serialize(new LE((Void) null)).length;
 
     private final long index;
     private final long term;
@@ -98,7 +60,12 @@ public final class SimpleReplicatedLogEntry implements ReplicatedLogEntry, Seria
 
     @Override
     public int size() {
-        return getData().size();
+        return payload.size();
+    }
+
+    @Override
+    public int serializedSize() {
+        return PROXY_SIZE + payload.serializedSize();
     }
 
     @Override
@@ -111,14 +78,6 @@ public final class SimpleReplicatedLogEntry implements ReplicatedLogEntry, Seria
         persistencePending = pending;
     }
 
-    private Object writeReplace() {
-        return new Proxy(this);
-    }
-
-    public int estimatedSerializedSize() {
-        return Proxy.estimatedSerializedSize(this);
-    }
-
     @Override
     public int hashCode() {
         final int prime = 31;
@@ -131,20 +90,17 @@ public final class SimpleReplicatedLogEntry implements ReplicatedLogEntry, Seria
 
     @Override
     public boolean equals(final Object obj) {
-        if (this == obj) {
-            return true;
-        }
-
-        if (obj == null || getClass() != obj.getClass()) {
-            return false;
-        }
-
-        SimpleReplicatedLogEntry other = (SimpleReplicatedLogEntry) obj;
-        return index == other.index && term == other.term && payload.equals(other.payload);
+        return this == obj || obj instanceof SimpleReplicatedLogEntry other && index == other.index
+            && term == other.term && payload.equals(other.payload);
     }
 
     @Override
     public String toString() {
         return "SimpleReplicatedLogEntry [index=" + index + ", term=" + term + ", payload=" + payload + "]";
     }
+
+    @java.io.Serial
+    private Object writeReplace() {
+        return new LE(this);
+    }
 }
index ca6e6dff30156b3e571281c1c85f89ac619a390c..250551a780d0a5842e409f2b59315f8d63b27191 100644 (file)
@@ -7,7 +7,6 @@
  */
 package org.opendaylight.controller.cluster.raft.persisted;
 
-import static com.google.common.base.Preconditions.checkArgument;
 import static java.util.Objects.requireNonNull;
 
 import akka.actor.ExtendedActorSystem;
@@ -45,11 +44,12 @@ public class SimpleReplicatedLogEntrySerializer extends JSerializer {
     }
 
     @Override
-    public byte[] toBinary(Object obj) {
-        checkArgument(obj instanceof SimpleReplicatedLogEntry, "Unsupported object type %s", obj.getClass());
+    public byte[] toBinary(final Object obj) {
+        if (!(obj instanceof SimpleReplicatedLogEntry replicatedLogEntry)) {
+            throw new IllegalArgumentException("Unsupported object type " + obj.getClass());
+        }
 
-        SimpleReplicatedLogEntry replicatedLogEntry = (SimpleReplicatedLogEntry)obj;
-        final int estimatedSerializedSize = replicatedLogEntry.estimatedSerializedSize();
+        final int estimatedSerializedSize = replicatedLogEntry.serializedSize();
 
         final ByteArrayOutputStream bos = new ByteArrayOutputStream(estimatedSerializedSize);
         SerializationUtils.serialize(replicatedLogEntry, bos);
@@ -62,7 +62,7 @@ public class SimpleReplicatedLogEntrySerializer extends JSerializer {
     }
 
     @Override
-    public Object fromBinaryJava(byte[] bytes, Class<?> manifest) {
+    public Object fromBinaryJava(final byte[] bytes, final Class<?> manifest) {
         try (ClassLoaderObjectInputStream is = new ClassLoaderObjectInputStream(system.dynamicAccess().classLoader(),
                 new ByteArrayInputStream(bytes))) {
             return is.readObject();
index 091009e2bd31e3898af14552874bf6cc17597898..81d2331bb4b6c9af4b700c9be2b1a2a37f8328da 100644 (file)
@@ -7,24 +7,16 @@
  */
 package org.opendaylight.controller.cluster.raft.persisted;
 
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import java.io.Serializable;
-import java.util.ArrayList;
 import java.util.List;
 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 
 /**
  * Represents a snapshot of the raft data.
  *
  * @author Thomas Pantelis
  */
-// Not final for mocking
-public class Snapshot implements Serializable {
-
+public final class Snapshot implements Serializable {
     /**
      * Implementations of this interface are used as the state payload for a snapshot.
      *
@@ -42,70 +34,7 @@ public class Snapshot implements Serializable {
         }
     }
 
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private Snapshot snapshot;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final Snapshot snapshot) {
-            this.snapshot = snapshot;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeLong(snapshot.lastIndex);
-            out.writeLong(snapshot.lastTerm);
-            out.writeLong(snapshot.lastAppliedIndex);
-            out.writeLong(snapshot.lastAppliedTerm);
-            out.writeLong(snapshot.electionTerm);
-            out.writeObject(snapshot.electionVotedFor);
-            out.writeObject(snapshot.serverConfig);
-
-            out.writeInt(snapshot.unAppliedEntries.size());
-            for (ReplicatedLogEntry e: snapshot.unAppliedEntries) {
-                out.writeLong(e.getIndex());
-                out.writeLong(e.getTerm());
-                out.writeObject(e.getData());
-            }
-
-            out.writeObject(snapshot.state);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-            long lastIndex = in.readLong();
-            long lastTerm = in.readLong();
-            long lastAppliedIndex = in.readLong();
-            long lastAppliedTerm = in.readLong();
-            long electionTerm = in.readLong();
-            String electionVotedFor = (String) in.readObject();
-            ServerConfigurationPayload serverConfig = (ServerConfigurationPayload) in.readObject();
-
-            int size = in.readInt();
-            List<ReplicatedLogEntry> unAppliedEntries = new ArrayList<>(size);
-            for (int i = 0; i < size; i++) {
-                unAppliedEntries.add(new SimpleReplicatedLogEntry(in.readLong(), in.readLong(),
-                        (Payload) in.readObject()));
-            }
-
-            State state = (State) in.readObject();
-
-            snapshot = Snapshot.create(state, unAppliedEntries, lastIndex, lastTerm, lastAppliedIndex, lastAppliedTerm,
-                    electionTerm, electionVotedFor, serverConfig);
-        }
-
-        private Object readResolve() {
-            return snapshot;
-        }
-    }
-
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final State state;
@@ -118,7 +47,7 @@ public class Snapshot implements Serializable {
     private final String electionVotedFor;
     private final ServerConfigurationPayload serverConfig;
 
-    Snapshot(final State state, final List<ReplicatedLogEntry> unAppliedEntries, final long lastIndex,
+    private Snapshot(final State state, final List<ReplicatedLogEntry> unAppliedEntries, final long lastIndex,
             final long lastTerm, final long lastAppliedIndex, final long lastAppliedTerm, final long electionTerm,
             final String electionVotedFor, final ServerConfigurationPayload serverConfig) {
         this.state = state;
@@ -160,7 +89,7 @@ public class Snapshot implements Serializable {
     }
 
     public long getLastIndex() {
-        return this.lastIndex;
+        return lastIndex;
     }
 
     public long getElectionTerm() {
@@ -175,10 +104,6 @@ public class Snapshot implements Serializable {
         return serverConfig;
     }
 
-    private Object writeReplace() {
-        return new Proxy(this);
-    }
-
     @Override
     public String toString() {
         return "Snapshot [lastIndex=" + lastIndex + ", lastTerm=" + lastTerm + ", lastAppliedIndex=" + lastAppliedIndex
@@ -186,4 +111,9 @@ public class Snapshot implements Serializable {
                 + ", state=" + state + ", electionTerm=" + electionTerm + ", electionVotedFor="
                 + electionVotedFor + ", ServerConfigPayload="  + serverConfig + "]";
     }
+
+    @java.io.Serial
+    private Object writeReplace() {
+        return new SS(this);
+    }
 }
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/UT.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/UT.java
new file mode 100644 (file)
index 0000000..0fc6f6d
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link UpdateElectionTerm}.
+ */
+final class UT implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private UpdateElectionTerm updateElectionTerm;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public UT() {
+        // For Externalizable
+    }
+
+    UT(final UpdateElectionTerm updateElectionTerm) {
+        this.updateElectionTerm = requireNonNull(updateElectionTerm);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        WritableObjects.writeLong(out, updateElectionTerm.getCurrentTerm());
+        out.writeObject(updateElectionTerm.getVotedFor());
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        updateElectionTerm = new UpdateElectionTerm(WritableObjects.readLong(in), (String) in.readObject());
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(updateElectionTerm);
+    }
+}
index 939d893a2ec505538f918a1f30ad5c927c56666a..3ef7acbea369c35f4fa365ac5736aa0b3f34df7f 100644 (file)
@@ -7,48 +7,13 @@
  */
 package org.opendaylight.controller.cluster.raft.persisted;
 
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import java.io.Serializable;
 
 /**
  * Message class to persist election term information.
  */
-public class UpdateElectionTerm implements Serializable {
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private UpdateElectionTerm updateElectionTerm;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final UpdateElectionTerm updateElectionTerm) {
-            this.updateElectionTerm = updateElectionTerm;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeLong(updateElectionTerm.currentTerm);
-            out.writeObject(updateElectionTerm.votedFor);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-            updateElectionTerm = new UpdateElectionTerm(in.readLong(), (String) in.readObject());
-        }
-
-        private Object readResolve() {
-            return updateElectionTerm;
-        }
-    }
-
+public final class UpdateElectionTerm implements Serializable {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final long currentTerm;
@@ -67,13 +32,14 @@ public class UpdateElectionTerm implements Serializable {
         return votedFor;
     }
 
-    private Object writeReplace() {
-        return new Proxy(this);
-    }
-
     @Override
     public String toString() {
         return "UpdateElectionTerm [currentTerm=" + currentTerm + ", votedFor=" + votedFor + "]";
     }
+
+    @java.io.Serial
+    private Object writeReplace() {
+        return new UT(this);
+    }
 }
 
index d8d0ce57721d0c5e11876910e1f721c493628a4f..f59598876de66a975c06766577f39ad3371aa62c 100644 (file)
@@ -7,7 +7,6 @@
  */
 package org.opendaylight.controller.cluster.raft;
 
-import static akka.pattern.Patterns.ask;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 
@@ -17,16 +16,15 @@ import akka.actor.PoisonPill;
 import akka.actor.Terminated;
 import akka.dispatch.Dispatchers;
 import akka.dispatch.Mailboxes;
+import akka.pattern.Patterns;
 import akka.testkit.TestActorRef;
 import akka.testkit.javadsl.TestKit;
 import akka.util.Timeout;
 import com.google.common.base.Stopwatch;
-import com.google.common.collect.ImmutableMap;
 import com.google.common.util.concurrent.Uninterruptibles;
 import java.io.OutputStream;
 import java.time.Duration;
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
@@ -44,10 +42,10 @@ import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
 import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
 import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
 import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
 import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
 import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
@@ -126,7 +124,7 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest
 
         TestRaftActor(final Builder builder) {
             super(builder);
-            this.collectorActor = builder.collectorActor;
+            collectorActor = builder.collectorActor;
         }
 
         public void startDropMessages(final Class<?> msgClass) {
@@ -148,26 +146,23 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest
         @SuppressWarnings({ "rawtypes", "unchecked", "checkstyle:IllegalCatch" })
         @Override
         public void handleCommand(final Object message) {
-            if (message instanceof MockPayload) {
-                MockPayload payload = (MockPayload) message;
+            if (message instanceof MockPayload payload) {
                 super.persistData(collectorActor, new MockIdentifier(payload.toString()), payload, false);
                 return;
             }
 
-            if (message instanceof ServerConfigurationPayload) {
-                super.persistData(collectorActor, new MockIdentifier("serverConfig"), (Payload) message, false);
+            if (message instanceof ServerConfigurationPayload payload) {
+                super.persistData(collectorActor, new MockIdentifier("serverConfig"), payload, false);
                 return;
             }
 
-            if (message instanceof SetPeerAddress) {
-                setPeerAddress(((SetPeerAddress) message).getPeerId(),
-                        ((SetPeerAddress) message).getPeerAddress());
+            if (message instanceof SetPeerAddress setPeerAddress) {
+                setPeerAddress(setPeerAddress.getPeerId(), setPeerAddress.getPeerAddress());
                 return;
             }
 
-            if (message instanceof TestPersist) {
-                persistData(((TestPersist) message).getActorRef(), ((TestPersist) message).getIdentifier(),
-                        ((TestPersist) message).getPayload(), false);
+            if (message instanceof TestPersist testPersist) {
+                persistData(testPersist.getActorRef(), testPersist.getIdentifier(), testPersist.getPayload(), false);
                 return;
             }
 
@@ -190,9 +185,9 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest
         @Override
         @SuppressWarnings("checkstyle:IllegalCatch")
         public void createSnapshot(final ActorRef actorRef, final Optional<OutputStream> installSnapshotStream) {
-            MockSnapshotState snapshotState = new MockSnapshotState(new ArrayList<>(getState()));
+            MockSnapshotState snapshotState = new MockSnapshotState(List.copyOf(getState()));
             if (installSnapshotStream.isPresent()) {
-                SerializationUtils.serialize(snapshotState, installSnapshotStream.get());
+                SerializationUtils.serialize(snapshotState, installSnapshotStream.orElseThrow());
             }
 
             actorRef.tell(new CaptureSnapshotReply(snapshotState, installSnapshotStream), actorRef);
@@ -214,13 +209,14 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest
             }
 
             public Builder collectorActor(final ActorRef newCollectorActor) {
-                this.collectorActor = newCollectorActor;
+                collectorActor = newCollectorActor;
                 return this;
             }
         }
     }
 
-    protected static final int SNAPSHOT_CHUNK_SIZE = 100;
+    // FIXME: this is an arbitrary limit. Document interactions and/or improve them to improve maintainability
+    protected static final int MAXIMUM_MESSAGE_SLICE_SIZE = 700;
 
     protected final Logger testLog = LoggerFactory.getLogger(getClass());
 
@@ -242,16 +238,16 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest
     protected String follower2Id = factory.generateActorId("follower");
     protected TestActorRef<TestRaftActor> follower2Actor;
     protected ActorRef follower2CollectorActor;
-    protected  RaftActorBehavior follower2;
+    protected RaftActorBehavior follower2;
     protected RaftActorContext follower2Context;
 
-    protected ImmutableMap<String, String> peerAddresses;
+    protected Map<String, String> peerAddresses;
 
     protected long initialTerm = 5;
     protected long currentTerm;
 
     protected int snapshotBatchCount = 4;
-    protected int snapshotChunkSize = SNAPSHOT_CHUNK_SIZE;
+    protected int maximumMessageSliceSize = MAXIMUM_MESSAGE_SLICE_SIZE;
 
     protected List<MockPayload> expSnapshotState = new ArrayList<>();
 
@@ -269,7 +265,7 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest
         configParams.setSnapshotBatchCount(snapshotBatchCount);
         configParams.setSnapshotDataThresholdPercentage(70);
         configParams.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
-        configParams.setSnapshotChunkSize(snapshotChunkSize);
+        configParams.setMaximumMessageSliceSize(maximumMessageSliceSize);
         return configParams;
     }
 
@@ -287,7 +283,7 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest
     protected TestActorRef<TestRaftActor> newTestRaftActor(final String id, final Map<String, String> newPeerAddresses,
             final ConfigParams configParams) {
         return newTestRaftActor(id, TestRaftActor.newBuilder().peerAddresses(newPeerAddresses != null
-                ? newPeerAddresses : Collections.<String, String>emptyMap()).config(configParams));
+                ? newPeerAddresses : Map.of()).config(configParams));
     }
 
     protected TestActorRef<TestRaftActor> newTestRaftActor(final String id, final TestRaftActor.Builder builder) {
@@ -423,7 +419,7 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest
         Stopwatch sw = Stopwatch.createStarted();
         while (sw.elapsed(TimeUnit.SECONDS) <= 5) {
             try {
-                OnDemandRaftState raftState = (OnDemandRaftState)Await.result(ask(raftActor,
+                OnDemandRaftState raftState = (OnDemandRaftState)Await.result(Patterns.ask(raftActor,
                         GetOnDemandRaftState.INSTANCE, timeout), timeout.duration());
                 verifier.accept(raftState);
                 return;
index 983b26da9c15353eb6a440418c938a3c3413fb18..65ac83d0d00c17d6c8a7e47136a2e95772a754bb 100644 (file)
@@ -145,25 +145,32 @@ public class AbstractReplicatedLogImplTest {
         from = replicatedLogImpl.getFrom(0, 20, ReplicatedLog.NO_MAX_SIZE);
         assertEquals(4, from.size());
         assertEquals("A", from.get(0).getData().toString());
+        assertEquals("B", from.get(1).getData().toString());
+        assertEquals("C", from.get(2).getData().toString());
         assertEquals("D", from.get(3).getData().toString());
 
+        // Pre-calculate sizing information for use with capping
+        final int sizeB = from.get(1).serializedSize();
+        final int sizeC = from.get(2).serializedSize();
+        final int sizeD = from.get(3).serializedSize();
+
         from = replicatedLogImpl.getFrom(1, 2, ReplicatedLog.NO_MAX_SIZE);
         assertEquals(2, from.size());
         assertEquals("B", from.get(0).getData().toString());
         assertEquals("C", from.get(1).getData().toString());
 
-        from = replicatedLogImpl.getFrom(1, 3, 2);
+        from = replicatedLogImpl.getFrom(1, 3, sizeB + sizeC);
         assertEquals(2, from.size());
         assertEquals("B", from.get(0).getData().toString());
         assertEquals("C", from.get(1).getData().toString());
 
-        from = replicatedLogImpl.getFrom(1, 3, 3);
+        from = replicatedLogImpl.getFrom(1, 3, sizeB + sizeC + sizeD);
         assertEquals(3, from.size());
         assertEquals("B", from.get(0).getData().toString());
         assertEquals("C", from.get(1).getData().toString());
         assertEquals("D", from.get(2).getData().toString());
 
-        from = replicatedLogImpl.getFrom(1, 2, 3);
+        from = replicatedLogImpl.getFrom(1, 2, sizeB + sizeC + sizeD);
         assertEquals(2, from.size());
         assertEquals("B", from.get(0).getData().toString());
         assertEquals("C", from.get(1).getData().toString());
index e99215ddbaa8cee74d457b2ef1f55e098653a8f4..a565932a02a5da9fae22b55a27d2987350ca4ec1 100644 (file)
@@ -18,11 +18,10 @@ import akka.actor.Status;
 import akka.pattern.Patterns;
 import akka.testkit.TestActorRef;
 import akka.testkit.javadsl.TestKit;
-import com.google.common.collect.ImmutableMap;
-import java.util.Arrays;
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.TimeUnit;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
@@ -143,9 +142,9 @@ public class LeadershipTransferIntegrationTest extends AbstractRaftActorIntegrat
     private void createRaftActors() {
         testLog.info("createRaftActors starting");
 
-        final Snapshot snapshot = Snapshot.create(EmptyState.INSTANCE, Collections.emptyList(), -1, -1, -1, -1,
+        final Snapshot snapshot = Snapshot.create(EmptyState.INSTANCE, List.of(), -1, -1, -1, -1,
                 1, null, new org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload(
-                        Arrays.asList(new ServerInfo(leaderId, true), new ServerInfo(follower1Id, true),
+                        List.of(new ServerInfo(leaderId, true), new ServerInfo(follower1Id, true),
                                 new ServerInfo(follower2Id, true), new ServerInfo(follower3Id, false))));
 
         InMemorySnapshotStore.addSnapshot(leaderId, snapshot);
@@ -156,28 +155,28 @@ public class LeadershipTransferIntegrationTest extends AbstractRaftActorIntegrat
         follower1NotifierActor = factory.createActor(MessageCollectorActor.props(),
                 factory.generateActorId(follower1Id + "-notifier"));
         follower1Actor = newTestRaftActor(follower1Id, TestRaftActor.newBuilder().peerAddresses(
-                ImmutableMap.of(leaderId, testActorPath(leaderId), follower2Id, testActorPath(follower2Id),
+                Map.of(leaderId, testActorPath(leaderId), follower2Id, testActorPath(follower2Id),
                         follower3Id, testActorPath(follower3Id)))
                 .config(newFollowerConfigParams()).roleChangeNotifier(follower1NotifierActor));
 
         follower2NotifierActor = factory.createActor(MessageCollectorActor.props(),
                 factory.generateActorId(follower2Id + "-notifier"));
         follower2Actor = newTestRaftActor(follower2Id,TestRaftActor.newBuilder().peerAddresses(
-                ImmutableMap.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString(),
+                Map.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString(),
                         follower3Id, testActorPath(follower3Id)))
                 .config(newFollowerConfigParams()).roleChangeNotifier(follower2NotifierActor));
 
         follower3NotifierActor = factory.createActor(MessageCollectorActor.props(),
                 factory.generateActorId(follower3Id + "-notifier"));
         follower3Actor = newTestRaftActor(follower3Id,TestRaftActor.newBuilder().peerAddresses(
-                ImmutableMap.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString(),
+                Map.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString(),
                         follower2Id, follower2Actor.path().toString()))
                 .config(newFollowerConfigParams()).roleChangeNotifier(follower3NotifierActor));
 
-        peerAddresses = ImmutableMap.<String, String>builder()
-                .put(follower1Id, follower1Actor.path().toString())
-                .put(follower2Id, follower2Actor.path().toString())
-                .put(follower3Id, follower3Actor.path().toString()).build();
+        peerAddresses = Map.of(
+                follower1Id, follower1Actor.path().toString(),
+                follower2Id, follower2Actor.path().toString(),
+                follower3Id, follower3Actor.path().toString());
 
         leaderConfigParams = newLeaderConfigParams();
         leaderConfigParams.setElectionTimeoutFactor(3);
index 4b241f95768882d498765bfc6b35a367598785b6..83aebc37c316729261a321d3329738e22547c9f9 100644 (file)
@@ -21,6 +21,7 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Optional;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
@@ -28,8 +29,8 @@ import java.util.function.Function;
 import org.apache.commons.lang3.SerializationUtils;
 import org.opendaylight.controller.cluster.DataPersistenceProvider;
 import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.yangtools.concepts.Identifier;
 
 public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort, RaftActorSnapshotCohort {
@@ -52,14 +53,14 @@ public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort,
         super(builder.id, builder.peerAddresses != null ? builder.peerAddresses :
             Collections.emptyMap(), Optional.ofNullable(builder.config), PAYLOAD_VERSION);
         state = Collections.synchronizedList(new ArrayList<>());
-        this.actorDelegate = mock(RaftActor.class);
-        this.recoveryCohortDelegate = mock(RaftActorRecoveryCohort.class);
+        actorDelegate = mock(RaftActor.class);
+        recoveryCohortDelegate = mock(RaftActorRecoveryCohort.class);
 
-        this.snapshotCohortDelegate = builder.snapshotCohort != null ? builder.snapshotCohort :
+        snapshotCohortDelegate = builder.snapshotCohort != null ? builder.snapshotCohort :
             mock(RaftActorSnapshotCohort.class);
 
         if (builder.dataPersistenceProvider == null) {
-            setPersistence(builder.persistent.isPresent() ? builder.persistent.get() : true);
+            setPersistence(builder.persistent.isPresent() ? builder.persistent.orElseThrow() : true);
         } else {
             setPersistence(builder.dataPersistenceProvider);
         }
@@ -174,9 +175,9 @@ public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort,
     }
 
     private void applySnapshotState(final Snapshot.State newState) {
-        if (newState instanceof MockSnapshotState) {
+        if (newState instanceof MockSnapshotState mockState) {
             state.clear();
-            state.addAll(((MockSnapshotState)newState).getState());
+            state.addAll(mockState.getState());
         }
     }
 
@@ -213,7 +214,7 @@ public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort,
     }
 
     @Override public String persistenceId() {
-        return this.getId();
+        return getId();
     }
 
     protected void newBehavior(final RaftActorBehavior newBehavior) {
@@ -243,15 +244,15 @@ public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort,
     }
 
     public static List<Object> fromState(final Snapshot.State from) {
-        if (from instanceof MockSnapshotState) {
-            return ((MockSnapshotState)from).getState();
+        if (from instanceof MockSnapshotState mockState) {
+            return mockState.getState();
         }
 
         throw new IllegalStateException("Unexpected snapshot State: " + from);
     }
 
     public ReplicatedLog getReplicatedLog() {
-        return this.getRaftActorContext().getReplicatedLog();
+        return getRaftActorContext().getReplicatedLog();
     }
 
     @Override
@@ -296,52 +297,52 @@ public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort,
         }
 
         public T id(final String newId) {
-            this.id = newId;
+            id = newId;
             return self();
         }
 
         public T peerAddresses(final Map<String, String> newPeerAddresses) {
-            this.peerAddresses = newPeerAddresses;
+            peerAddresses = newPeerAddresses;
             return self();
         }
 
         public T config(final ConfigParams newConfig) {
-            this.config = newConfig;
+            config = newConfig;
             return self();
         }
 
         public T dataPersistenceProvider(final DataPersistenceProvider newDataPersistenceProvider) {
-            this.dataPersistenceProvider = newDataPersistenceProvider;
+            dataPersistenceProvider = newDataPersistenceProvider;
             return self();
         }
 
         public T roleChangeNotifier(final ActorRef newRoleChangeNotifier) {
-            this.roleChangeNotifier = newRoleChangeNotifier;
+            roleChangeNotifier = newRoleChangeNotifier;
             return self();
         }
 
         public T snapshotMessageSupport(final RaftActorSnapshotMessageSupport newSnapshotMessageSupport) {
-            this.snapshotMessageSupport = newSnapshotMessageSupport;
+            snapshotMessageSupport = newSnapshotMessageSupport;
             return self();
         }
 
         public T restoreFromSnapshot(final Snapshot newRestoreFromSnapshot) {
-            this.restoreFromSnapshot = newRestoreFromSnapshot;
+            restoreFromSnapshot = newRestoreFromSnapshot;
             return self();
         }
 
         public T persistent(final Optional<Boolean> newPersistent) {
-            this.persistent = newPersistent;
+            persistent = newPersistent;
             return self();
         }
 
         public T pauseLeaderFunction(final Function<Runnable, Void> newPauseLeaderFunction) {
-            this.pauseLeaderFunction = newPauseLeaderFunction;
+            pauseLeaderFunction = newPauseLeaderFunction;
             return self();
         }
 
         public T snapshotCohort(final RaftActorSnapshotCohort newSnapshotCohort) {
-            this.snapshotCohort = newSnapshotCohort;
+            snapshotCohort = newSnapshotCohort;
             return self();
         }
 
@@ -371,10 +372,7 @@ public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort,
 
         @Override
         public int hashCode() {
-            final int prime = 31;
-            int result = 1;
-            result = prime * result + (state == null ? 0 : state.hashCode());
-            return result;
+            return Objects.hash(state);
         }
 
         @Override
@@ -389,11 +387,7 @@ public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort,
                 return false;
             }
             MockSnapshotState other = (MockSnapshotState) obj;
-            if (state == null) {
-                if (other.state != null) {
-                    return false;
-                }
-            } else if (!state.equals(other.state)) {
+            if (!Objects.equals(state, other.state)) {
                 return false;
             }
             return true;
index 8c17e1e8e8e9d4060221f8c68b2a980f3f6b1b32..6d4ec22e3d6be213f9a5f09d9b62042b9649d527 100644 (file)
@@ -8,6 +8,8 @@
 
 package org.opendaylight.controller.cluster.raft;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.ActorSystem;
@@ -19,16 +21,17 @@ import java.io.OutputStream;
 import java.io.Serializable;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Optional;
 import java.util.function.Consumer;
 import org.opendaylight.controller.cluster.DataPersistenceProvider;
 import org.opendaylight.controller.cluster.NonPersistentDataProvider;
 import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.persisted.ByteState;
 import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot.State;
 import org.opendaylight.controller.cluster.raft.policy.RaftPolicy;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -56,8 +59,8 @@ public class MockRaftActorContext extends RaftActorContextImpl {
 
             @Override
             public void update(final long newTerm, final String newVotedFor) {
-                this.currentTerm = newTerm;
-                this.votedFor = newVotedFor;
+                currentTerm = newTerm;
+                votedFor = newVotedFor;
 
                 // TODO : Write to some persistent state
             }
@@ -109,7 +112,7 @@ public class MockRaftActorContext extends RaftActorContextImpl {
     }
 
     @Override public ActorSystem getActorSystem() {
-        return this.system;
+        return system;
     }
 
     @Override public ActorSelection getPeerActorSelection(final String peerId) {
@@ -200,21 +203,22 @@ public class MockRaftActorContext extends RaftActorContextImpl {
         }
     }
 
-    public static class MockPayload extends Payload implements Serializable {
+    public static final class MockPayload extends Payload {
         private static final long serialVersionUID = 3121380393130864247L;
-        private String value = "";
-        private int size;
+
+        private final String data;
+        private final int size;
 
         public MockPayload() {
+            this("");
         }
 
         public MockPayload(final String data) {
-            this.value = data;
-            size = value.length();
+            this(data, data.length());
         }
 
         public MockPayload(final String data, final int size) {
-            this(data);
+            this.data = requireNonNull(data);
             this.size = size;
         }
 
@@ -223,39 +227,46 @@ public class MockRaftActorContext extends RaftActorContextImpl {
             return size;
         }
 
+        @Override
+        public int serializedSize() {
+            return size;
+        }
+
         @Override
         public String toString() {
-            return value;
+            return data;
         }
 
         @Override
         public int hashCode() {
-            final int prime = 31;
-            int result = 1;
-            result = prime * result + (value == null ? 0 : value.hashCode());
-            return result;
+            return data.hashCode();
         }
 
         @Override
         public boolean equals(final Object obj) {
-            if (this == obj) {
-                return true;
-            }
-            if (obj == null) {
-                return false;
-            }
-            if (getClass() != obj.getClass()) {
-                return false;
-            }
-            MockPayload other = (MockPayload) obj;
-            if (value == null) {
-                if (other.value != null) {
-                    return false;
-                }
-            } else if (!value.equals(other.value)) {
-                return false;
-            }
-            return true;
+            return this == obj || obj instanceof MockPayload other && Objects.equals(data, other.data)
+                && size == other.size;
+        }
+
+        @Override
+        protected Object writeReplace() {
+            return new MockPayloadProxy(data, size);
+        }
+    }
+
+    private static final class MockPayloadProxy implements Serializable {
+        private static final long serialVersionUID = 1L;
+
+        private final String value;
+        private final int size;
+
+        MockPayloadProxy(String value, int size) {
+            this.value = value;
+            this.size = size;
+        }
+
+        Object readResolve() {
+            return new MockPayload(value, size);
         }
     }
 
@@ -264,19 +275,19 @@ public class MockRaftActorContext extends RaftActorContextImpl {
 
         public  MockReplicatedLogBuilder createEntries(final int start, final int end, final int term) {
             for (int i = start; i < end; i++) {
-                this.mockLog.append(new SimpleReplicatedLogEntry(i, term,
+                mockLog.append(new SimpleReplicatedLogEntry(i, term,
                         new MockRaftActorContext.MockPayload(Integer.toString(i))));
             }
             return this;
         }
 
         public  MockReplicatedLogBuilder addEntry(final int index, final int term, final MockPayload payload) {
-            this.mockLog.append(new SimpleReplicatedLogEntry(index, term, payload));
+            mockLog.append(new SimpleReplicatedLogEntry(index, term, payload));
             return this;
         }
 
         public ReplicatedLog build() {
-            return this.mockLog;
+            return mockLog;
         }
     }
 
index 9f930fd71ad827a2871dc2d4fc150ee1d4aac0a8..f875f891b148a64a1e322425d8f682acfc367f3b 100644 (file)
@@ -10,10 +10,10 @@ package org.opendaylight.controller.cluster.raft;
 import static org.junit.Assert.assertEquals;
 
 import akka.actor.ActorRef;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Sets;
-import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
 import java.util.Optional;
+import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
@@ -291,7 +291,7 @@ public class NonVotingFollowerIntegrationTest extends AbstractRaftActorIntegrati
         //
         // We also add another voting follower actor into the mix even though it shoildn't affect the
         // outcome.
-        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
+        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of(
                 new ServerInfo(leaderId, true), new ServerInfo(follower1Id, false),
                 new ServerInfo(follower2Id, true), new ServerInfo("downPeer", false)));
         SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, currentTerm,
@@ -305,13 +305,13 @@ public class NonVotingFollowerIntegrationTest extends AbstractRaftActorIntegrati
         DefaultConfigParamsImpl follower2ConfigParams = newFollowerConfigParams();
         follower2ConfigParams.setCustomRaftPolicyImplementationClass(DisableElectionsRaftPolicy.class.getName());
         follower2Actor = newTestRaftActor(follower2Id, TestRaftActor.newBuilder().peerAddresses(
-                ImmutableMap.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString()))
+                Map.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString()))
                     .config(follower2ConfigParams).persistent(Optional.of(false)));
         TestRaftActor follower2Instance = follower2Actor.underlyingActor();
         follower2Instance.waitForRecoveryComplete();
         follower2CollectorActor = follower2Instance.collectorActor();
 
-        peerAddresses = ImmutableMap.of(follower1Id, follower1Actor.path().toString(),
+        peerAddresses = Map.of(follower1Id, follower1Actor.path().toString(),
                 follower2Id, follower2Actor.path().toString());
 
         createNewLeaderActor();
@@ -399,7 +399,7 @@ public class NonVotingFollowerIntegrationTest extends AbstractRaftActorIntegrati
 
         // Set up a persisted ServerConfigurationPayload with the leader voting and the follower non-voting.
 
-        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
+        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of(
                 new ServerInfo(leaderId, true), new ServerInfo(follower1Id, false)));
         SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, persistedTerm,
                 persistedServerConfig);
@@ -411,11 +411,10 @@ public class NonVotingFollowerIntegrationTest extends AbstractRaftActorIntegrati
 
         DefaultConfigParamsImpl followerConfigParams = newFollowerConfigParams();
         follower1Actor = newTestRaftActor(follower1Id, follower1Builder.peerAddresses(
-                ImmutableMap.of(leaderId, testActorPath(leaderId))).config(followerConfigParams)
+                Map.of(leaderId, testActorPath(leaderId))).config(followerConfigParams)
                     .persistent(Optional.of(false)));
 
-        peerAddresses = ImmutableMap.<String, String>builder()
-                .put(follower1Id, follower1Actor.path().toString()).build();
+        peerAddresses = Map.of(follower1Id, follower1Actor.path().toString());
 
         leaderConfigParams = newLeaderConfigParams();
         leaderActor = newTestRaftActor(leaderId, TestRaftActor.newBuilder().peerAddresses(peerAddresses)
@@ -436,16 +435,16 @@ public class NonVotingFollowerIntegrationTest extends AbstractRaftActorIntegrati
 
         currentTerm = persistedTerm + 1;
         assertEquals("Leader term", currentTerm, leaderContext.getTermInformation().getCurrentTerm());
-        assertEquals("Leader server config", Sets.newHashSet(persistedServerConfig.getServerConfig()),
-                Sets.newHashSet(leaderContext.getPeerServerInfo(true).getServerConfig()));
+        assertEquals("Leader server config", Set.copyOf(persistedServerConfig.getServerConfig()),
+                Set.copyOf(leaderContext.getPeerServerInfo(true).getServerConfig()));
         assertEquals("Leader isVotingMember", true, leaderContext.isVotingMember());
 
         // Verify follower's context after startup
 
         MessageCollectorActor.expectFirstMatching(follower1CollectorActor, AppendEntries.class);
         assertEquals("Follower term", currentTerm, follower1Context.getTermInformation().getCurrentTerm());
-        assertEquals("Follower server config", Sets.newHashSet(persistedServerConfig.getServerConfig()),
-                Sets.newHashSet(follower1Context.getPeerServerInfo(true).getServerConfig()));
+        assertEquals("Follower server config", Set.copyOf(persistedServerConfig.getServerConfig()),
+                Set.copyOf(follower1Context.getPeerServerInfo(true).getServerConfig()));
         assertEquals("FollowerisVotingMember", false, follower1Context.isVotingMember());
     }
 }
index 815b8d9d4869c4fbcd4806274cf1ee49c86ac5c1..fabfc6c280468792da400318a8fe42b8c72c00d5 100644 (file)
@@ -19,10 +19,9 @@ import static org.mockito.Mockito.verify;
 
 import akka.actor.Props;
 import akka.testkit.TestActorRef;
-import com.google.common.collect.ImmutableMap;
 import com.google.common.util.concurrent.MoreExecutors;
-import java.util.Arrays;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import org.junit.After;
 import org.junit.Test;
@@ -84,7 +83,7 @@ public class RaftActorContextImplTest extends AbstractActorTest {
         DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
         RaftActorContextImpl context = new RaftActorContextImpl(actor, actor.underlyingActor().getContext(),
                 "test", new ElectionTermImpl(createProvider(), "test", LOG), -1, -1,
-                new HashMap<>(ImmutableMap.of("peer1", "peerAddress1")), configParams,
+                Map.of("peer1", "peerAddress1"), configParams,
                 createProvider(), applyState -> { }, LOG,  MoreExecutors.directExecutor());
 
         context.setPeerAddress("peer1", "peerAddress1_1");
@@ -98,24 +97,24 @@ public class RaftActorContextImplTest extends AbstractActorTest {
     public void testUpdatePeerIds() {
         RaftActorContextImpl context = new RaftActorContextImpl(actor, actor.underlyingActor().getContext(),
                 "self", new ElectionTermImpl(createProvider(), "test", LOG), -1, -1,
-                new HashMap<>(ImmutableMap.of("peer1", "peerAddress1")),
+                Map.of("peer1", "peerAddress1"),
                 new DefaultConfigParamsImpl(), createProvider(), applyState -> { }, LOG,
                 MoreExecutors.directExecutor());
 
-        context.updatePeerIds(new ServerConfigurationPayload(Arrays.asList(new ServerInfo("self", false),
+        context.updatePeerIds(new ServerConfigurationPayload(List.of(new ServerInfo("self", false),
                 new ServerInfo("peer2", true), new ServerInfo("peer3", false))));
         verifyPeerInfo(context, "peer1", null);
         verifyPeerInfo(context, "peer2", true);
         verifyPeerInfo(context, "peer3", false);
         assertEquals("isVotingMember", false, context.isVotingMember());
 
-        context.updatePeerIds(new ServerConfigurationPayload(Arrays.asList(new ServerInfo("self", true),
+        context.updatePeerIds(new ServerConfigurationPayload(List.of(new ServerInfo("self", true),
                 new ServerInfo("peer2", true), new ServerInfo("peer3", true))));
         verifyPeerInfo(context, "peer2", true);
         verifyPeerInfo(context, "peer3", true);
         assertEquals("isVotingMember", true, context.isVotingMember());
 
-        context.updatePeerIds(new ServerConfigurationPayload(Arrays.asList(new ServerInfo("peer2", true),
+        context.updatePeerIds(new ServerConfigurationPayload(List.of(new ServerInfo("peer2", true),
                 new ServerInfo("peer3", true))));
         verifyPeerInfo(context, "peer2", true);
         verifyPeerInfo(context, "peer3", true);
@@ -130,7 +129,7 @@ public class RaftActorContextImplTest extends AbstractActorTest {
         PeerInfo peerInfo = context.getPeerInfo(peerId);
         if (voting != null) {
             assertNotNull("Expected peer " + peerId, peerInfo);
-            assertEquals("getVotingState for " + peerId, voting.booleanValue()
+            assertEquals("getVotingState for " + peerId, voting
                     ? VotingState.VOTING : VotingState.NON_VOTING, peerInfo.getVotingState());
         } else {
             assertNull("Unexpected peer " + peerId, peerInfo);
index 7ef3c3237b62c92cce238870f812a156066ef8d6..22369d78870b64b2d1a3ebce71bdac2b795a5aaf 100644 (file)
@@ -21,8 +21,8 @@ import org.mockito.Mock;
 import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.DataPersistenceProvider;
 import org.opendaylight.controller.cluster.PersistentDataProvider;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.PersistentPayload;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
+import org.opendaylight.controller.cluster.raft.messages.PersistentPayload;
 
 /**
  * Unit tests for RaftActorDelegatingPersistentDataProvider.
@@ -98,12 +98,28 @@ public class RaftActorDelegatingPersistentDataProviderTest {
     }
 
     static class TestNonPersistentPayload extends Payload {
+        @java.io.Serial
+        private static final long serialVersionUID = 1L;
+
         @Override
         public int size() {
             return 0;
         }
+
+        @Override
+        public int serializedSize() {
+            return 0;
+        }
+
+        @Override
+        protected Object writeReplace() {
+            // Not needed
+            throw new UnsupportedOperationException();
+        }
     }
 
     static class TestPersistentPayload extends TestNonPersistentPayload implements PersistentPayload {
+        @java.io.Serial
+        private static final long serialVersionUID = 1L;
     }
 }
index 3e66c708dd25dc411979b0d238650624199b59b0..cceea83740116a00968cf6d864213edf0e945acc 100644 (file)
@@ -53,6 +53,7 @@ import org.opendaylight.controller.cluster.DataPersistenceProvider;
 import org.opendaylight.controller.cluster.PersistentDataProvider;
 import org.opendaylight.controller.cluster.raft.MockRaftActor.MockSnapshotState;
 import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
 import org.opendaylight.controller.cluster.raft.persisted.DeleteEntries;
 import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
@@ -60,7 +61,6 @@ import org.opendaylight.controller.cluster.raft.persisted.ServerInfo;
 import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
 import org.opendaylight.controller.cluster.raft.persisted.UpdateElectionTerm;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
index 140735b646e92e00f3d795bf02ff94a390becb96..884b16c11e8aceed093dfa2f063e7c5499c9a854 100644 (file)
@@ -22,20 +22,14 @@ import akka.dispatch.Dispatchers;
 import akka.testkit.TestActorRef;
 import akka.testkit.javadsl.TestKit;
 import com.google.common.base.Stopwatch;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
 import com.google.common.io.ByteSource;
 import com.google.common.util.concurrent.MoreExecutors;
 import java.io.OutputStream;
 import java.time.Duration;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
+import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import org.apache.commons.lang3.SerializationUtils;
 import org.junit.After;
@@ -157,7 +151,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         followerActorContext.setCurrentBehavior(follower);
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.of(FOLLOWER_ID, followerActor.path().toString()),
+                MockLeaderRaftActor.props(Map.of(FOLLOWER_ID, followerActor.path().toString()),
                         followerActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
@@ -178,7 +172,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         AddServerReply addServerReply = testKit.expectMsgClass(Duration.ofSeconds(5), AddServerReply.class);
         assertEquals("getStatus", ServerChangeStatus.OK, addServerReply.getStatus());
-        assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().get());
+        assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().orElseThrow());
 
         // Verify ServerConfigurationPayload entry in leader's log
 
@@ -204,10 +198,9 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         // Verify new server config was applied in both followers
 
-        assertEquals("Follower peers", ImmutableSet.of(LEADER_ID, NEW_SERVER_ID), followerActorContext.getPeerIds());
+        assertEquals("Follower peers", Set.of(LEADER_ID, NEW_SERVER_ID), followerActorContext.getPeerIds());
 
-        assertEquals("New follower peers", ImmutableSet.of(LEADER_ID, FOLLOWER_ID),
-                newFollowerActorContext.getPeerIds());
+        assertEquals("New follower peers", Set.of(LEADER_ID, FOLLOWER_ID), newFollowerActorContext.getPeerIds());
 
         assertEquals("Follower commit index", 3, followerActorContext.getCommitIndex());
         assertEquals("Follower last applied index", 3, followerActorContext.getLastApplied());
@@ -239,8 +232,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
                 0, 2, 1).build());
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
-                        initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+                MockLeaderRaftActor.props(Map.of(), initialActorContext)
+                    .withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
         MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
@@ -258,7 +251,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         AddServerReply addServerReply = testKit.expectMsgClass(Duration.ofSeconds(5), AddServerReply.class);
         assertEquals("getStatus", ServerChangeStatus.OK, addServerReply.getStatus());
-        assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().get());
+        assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().orElseThrow());
 
         // Verify ServerConfigurationPayload entry in leader's log
 
@@ -278,7 +271,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         // Verify new server config was applied in the new follower
 
-        assertEquals("New follower peers", ImmutableSet.of(LEADER_ID), newFollowerActorContext.getPeerIds());
+        assertEquals("New follower peers", Set.of(LEADER_ID), newFollowerActorContext.getPeerIds());
 
         LOG.info("testAddServerWithNoExistingFollower ending");
     }
@@ -291,8 +284,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         RaftActorContext initialActorContext = new MockRaftActorContext();
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
-                        initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+                MockLeaderRaftActor.props(Map.of(), initialActorContext)
+                    .withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
         MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
@@ -304,7 +297,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         AddServerReply addServerReply = testKit.expectMsgClass(Duration.ofSeconds(5), AddServerReply.class);
         assertEquals("getStatus", ServerChangeStatus.OK, addServerReply.getStatus());
-        assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().get());
+        assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().orElseThrow());
 
         // Verify ServerConfigurationPayload entry in leader's log
 
@@ -325,7 +318,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         // Verify new server config was applied in the new follower
 
-        assertEquals("New follower peers", ImmutableSet.of(LEADER_ID), newFollowerActorContext.getPeerIds());
+        assertEquals("New follower peers", Set.of(LEADER_ID), newFollowerActorContext.getPeerIds());
 
         assertNoneMatching(newFollowerCollectorActor, InstallSnapshot.class, 500);
 
@@ -361,8 +354,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         RaftActorContext initialActorContext = new MockRaftActorContext();
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
-                        initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+                MockLeaderRaftActor.props(Map.of(), initialActorContext)
+                    .withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
         MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
@@ -410,8 +403,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         // Verify ServerConfigurationPayload entry in the new follower
 
         expectMatching(newFollowerCollectorActor, ApplyState.class, 2);
-        assertEquals("New follower peers", ImmutableSet.of(LEADER_ID, NEW_SERVER_ID2),
-               newFollowerActorContext.getPeerIds());
+        assertEquals("New follower peers", Set.of(LEADER_ID, NEW_SERVER_ID2), newFollowerActorContext.getPeerIds());
 
         LOG.info("testAddServerWithOperationInProgress ending");
     }
@@ -424,8 +416,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         RaftActorContext initialActorContext = new MockRaftActorContext();
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
-                        initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+                MockLeaderRaftActor.props(Map.of(), initialActorContext)
+                    .withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
         MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
@@ -447,7 +439,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         AddServerReply addServerReply = testKit.expectMsgClass(Duration.ofSeconds(5), AddServerReply.class);
         assertEquals("getStatus", ServerChangeStatus.OK, addServerReply.getStatus());
-        assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().get());
+        assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().orElseThrow());
 
         expectFirstMatching(newFollowerCollectorActor, ApplySnapshot.class);
 
@@ -471,8 +463,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         RaftActorContext initialActorContext = new MockRaftActorContext();
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
-                        initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+                MockLeaderRaftActor.props(Map.of(), initialActorContext)
+                    .withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
         MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
@@ -503,8 +495,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         RaftActorContext initialActorContext = new MockRaftActorContext();
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
-                        initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+                MockLeaderRaftActor.props(Map.of(), initialActorContext)
+                    .withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
         MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
@@ -552,8 +544,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         RaftActorContext initialActorContext = new MockRaftActorContext();
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
-                        initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+                MockLeaderRaftActor.props(Map.of(), initialActorContext)
+                    .withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
         MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
@@ -597,8 +589,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         RaftActorContext initialActorContext = new MockRaftActorContext();
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
-                        initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+                MockLeaderRaftActor.props(Map.of(), initialActorContext)
+                    .withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
         MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
@@ -631,7 +623,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         configParams.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
 
         TestActorRef<MockRaftActor> noLeaderActor = actorFactory.createTestActor(
-                MockRaftActor.builder().id(LEADER_ID).peerAddresses(ImmutableMap.of(FOLLOWER_ID,
+                MockRaftActor.builder().id(LEADER_ID).peerAddresses(Map.of(FOLLOWER_ID,
                         followerActor.path().toString())).config(configParams).persistent(Optional.of(false))
                         .props().withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
@@ -653,8 +645,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         RaftActorContext initialActorContext = new MockRaftActorContext();
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
-                        initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+                MockLeaderRaftActor.props(Map.of(), initialActorContext)
+                    .withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
         MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
@@ -688,7 +680,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         // The first AddServer should succeed with OK even though consensus wasn't reached
         AddServerReply addServerReply = testKit.expectMsgClass(Duration.ofSeconds(5), AddServerReply.class);
         assertEquals("getStatus", ServerChangeStatus.OK, addServerReply.getStatus());
-        assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().get());
+        assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().orElseThrow());
 
         // Verify ServerConfigurationPayload entry in leader's log
         verifyServerConfigurationPayloadEntry(leaderActorContext.getReplicatedLog(), votingServer(LEADER_ID),
@@ -713,7 +705,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         RaftActorContext initialActorContext = new MockRaftActorContext();
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.of(FOLLOWER_ID, followerActor.path().toString()),
+                MockLeaderRaftActor.props(Map.of(FOLLOWER_ID, followerActor.path().toString()),
                         initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
@@ -737,14 +729,13 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
                 MessageCollectorActor.props(), actorFactory.generateActorId(LEADER_ID));
 
         TestActorRef<MockRaftActor> followerRaftActor = actorFactory.createTestActor(
-                MockRaftActor.builder().id(FOLLOWER_ID).peerAddresses(ImmutableMap.of(LEADER_ID,
+                MockRaftActor.builder().id(FOLLOWER_ID).peerAddresses(Map.of(LEADER_ID,
                         leaderActor.path().toString())).config(configParams).persistent(Optional.of(false))
                         .props().withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(FOLLOWER_ID));
         followerRaftActor.underlyingActor().waitForInitializeBehaviorComplete();
 
-        followerRaftActor.tell(new AppendEntries(1, LEADER_ID, 0, 1, Collections.<ReplicatedLogEntry>emptyList(),
-                -1, -1, (short)0), leaderActor);
+        followerRaftActor.tell(new AppendEntries(1, LEADER_ID, 0, 1, List.of(), -1, -1, (short)0), leaderActor);
 
         followerRaftActor.tell(new AddServer(NEW_SERVER_ID, newFollowerRaftActor.path().toString(), true),
                 testKit.getRef());
@@ -760,7 +751,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
         configParams.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
         TestActorRef<MockRaftActor> noLeaderActor = actorFactory.createTestActor(
-                MockRaftActor.builder().id(LEADER_ID).peerAddresses(ImmutableMap.of(FOLLOWER_ID,
+                MockRaftActor.builder().id(LEADER_ID).peerAddresses(Map.of(FOLLOWER_ID,
                         followerActor.path().toString())).config(configParams).persistent(Optional.of(false))
                         .props().withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
@@ -769,7 +760,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
                 noLeaderActor.underlyingActor());
 
         ReplicatedLogEntry serverConfigEntry = new SimpleReplicatedLogEntry(1, 1,
-                new ServerConfigurationPayload(Collections.<ServerInfo>emptyList()));
+                new ServerConfigurationPayload(List.of()));
         boolean handled = support.handleMessage(new ApplyState(null, null, serverConfigEntry), ActorRef.noSender());
         assertEquals("Message handled", true, handled);
 
@@ -789,7 +780,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         configParams.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
 
         TestActorRef<MockRaftActor> leaderActor = actorFactory.createTestActor(
-                MockRaftActor.builder().id(LEADER_ID).peerAddresses(ImmutableMap.of(FOLLOWER_ID,
+                MockRaftActor.builder().id(LEADER_ID).peerAddresses(Map.of(FOLLOWER_ID,
                         followerActor.path().toString())).config(configParams).persistent(Optional.of(false))
                         .props().withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
@@ -809,7 +800,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         RaftActorContext initialActorContext = new MockRaftActorContext();
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.of(FOLLOWER_ID, followerActor.path().toString()),
+                MockLeaderRaftActor.props(Map.of(FOLLOWER_ID, followerActor.path().toString()),
                         initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
@@ -831,14 +822,13 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
                 MessageCollectorActor.props(), actorFactory.generateActorId(LEADER_ID));
 
         TestActorRef<MockRaftActor> followerRaftActor = actorFactory.createTestActor(
-                MockRaftActor.builder().id(FOLLOWER_ID).peerAddresses(ImmutableMap.of(LEADER_ID,
+                MockRaftActor.builder().id(FOLLOWER_ID).peerAddresses(Map.of(LEADER_ID,
                         leaderActor.path().toString())).config(configParams).persistent(Optional.of(false))
                         .props().withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(FOLLOWER_ID));
         followerRaftActor.underlyingActor().waitForInitializeBehaviorComplete();
 
-        followerRaftActor.tell(new AppendEntries(1, LEADER_ID, 0, 1, Collections.<ReplicatedLogEntry>emptyList(),
-                -1, -1, (short)0), leaderActor);
+        followerRaftActor.tell(new AppendEntries(1, LEADER_ID, 0, 1, List.of(), -1, -1, (short)0), leaderActor);
 
         followerRaftActor.tell(new RemoveServer(FOLLOWER_ID), testKit.getRef());
         expectFirstMatching(leaderActor, RemoveServer.class);
@@ -862,7 +852,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         final String downNodeId = "downNode";
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(MockLeaderRaftActor.props(
-                ImmutableMap.of(FOLLOWER_ID, follower1ActorPath, FOLLOWER_ID2, follower2ActorPath, downNodeId, ""),
+                Map.of(FOLLOWER_ID, follower1ActorPath, FOLLOWER_ID2, follower2ActorPath, downNodeId, ""),
                         initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
@@ -871,14 +861,14 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         ActorRef follower1Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         final TestActorRef<CollectingMockRaftActor> follower1Actor = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(FOLLOWER_ID, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(),
+                CollectingMockRaftActor.props(FOLLOWER_ID, Map.of(LEADER_ID, leaderActor.path().toString(),
                         FOLLOWER_ID2, follower2ActorPath, downNodeId, ""), configParams, NO_PERSISTENCE,
                         follower1Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), follower1ActorId);
 
         ActorRef follower2Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         final TestActorRef<CollectingMockRaftActor> follower2Actor = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(FOLLOWER_ID2, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(),
+                CollectingMockRaftActor.props(FOLLOWER_ID2, Map.of(LEADER_ID, leaderActor.path().toString(),
                         FOLLOWER_ID, follower1ActorPath, downNodeId, ""), configParams, NO_PERSISTENCE,
                         follower2Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), follower2ActorId);
 
@@ -922,7 +912,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         RaftActorContext initialActorContext = new MockRaftActorContext();
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.of(FOLLOWER_ID, followerActorPath),
+                MockLeaderRaftActor.props(Map.of(FOLLOWER_ID, followerActorPath),
                         initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
@@ -931,7 +921,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         final ActorRef followerCollector =
                 actorFactory.createActor(MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         actorFactory.createTestActor(
-                CollectingMockRaftActor.props(FOLLOWER_ID, ImmutableMap.of(LEADER_ID, leaderActor.path().toString()),
+                CollectingMockRaftActor.props(FOLLOWER_ID, Map.of(LEADER_ID, leaderActor.path().toString()),
                         configParams, NO_PERSISTENCE, followerCollector)
                         .withDispatcher(Dispatchers.DefaultDispatcherId()),
                 followerActorId);
@@ -955,7 +945,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         LOG.info("testRemoveServerLeaderWithNoFollowers starting");
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(Collections.<String, String>emptyMap(),
+                MockLeaderRaftActor.props(Map.of(),
                         new MockRaftActorContext()).withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
@@ -980,7 +970,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         final String follower2ActorPath = actorFactory.createTestActorPath(follower2ActorId);
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.of(FOLLOWER_ID, follower1ActorPath,
+                MockLeaderRaftActor.props(Map.of(FOLLOWER_ID, follower1ActorPath,
                         FOLLOWER_ID2, follower2ActorPath), new MockRaftActorContext())
                         .withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID));
         ActorRef leaderCollector = newLeaderCollectorActor(leaderActor.underlyingActor());
@@ -988,20 +978,20 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         ActorRef follower1Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         final TestActorRef<CollectingMockRaftActor> follower1RaftActor = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(FOLLOWER_ID, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(),
+                CollectingMockRaftActor.props(FOLLOWER_ID, Map.of(LEADER_ID, leaderActor.path().toString(),
                         FOLLOWER_ID2, follower2ActorPath), configParams, NO_PERSISTENCE, follower1Collector)
                         .withDispatcher(Dispatchers.DefaultDispatcherId()), follower1ActorId);
 
         ActorRef follower2Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         final TestActorRef<CollectingMockRaftActor> follower2RaftActor = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(FOLLOWER_ID2, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(),
+                CollectingMockRaftActor.props(FOLLOWER_ID2, Map.of(LEADER_ID, leaderActor.path().toString(),
                         FOLLOWER_ID, follower1ActorPath), configParams, NO_PERSISTENCE, follower2Collector)
                         .withDispatcher(Dispatchers.DefaultDispatcherId()), follower2ActorId);
 
         // Send first ChangeServersVotingStatus message
 
-        leaderActor.tell(new ChangeServersVotingStatus(ImmutableMap.of(FOLLOWER_ID, false, FOLLOWER_ID2, false)),
+        leaderActor.tell(new ChangeServersVotingStatus(Map.of(FOLLOWER_ID, false, FOLLOWER_ID2, false)),
                 testKit.getRef());
         ServerChangeReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class);
         assertEquals("getStatus", ServerChangeStatus.OK, reply.getStatus());
@@ -1027,7 +1017,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         // Send second ChangeServersVotingStatus message
 
-        leaderActor.tell(new ChangeServersVotingStatus(ImmutableMap.of(FOLLOWER_ID, true)), testKit.getRef());
+        leaderActor.tell(new ChangeServersVotingStatus(Map.of(FOLLOWER_ID, true)), testKit.getRef());
         reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class);
         assertEquals("getStatus", ServerChangeStatus.OK, reply.getStatus());
 
@@ -1059,7 +1049,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         final String follower2ActorPath = actorFactory.createTestActorPath(follower2ActorId);
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.of(FOLLOWER_ID, follower1ActorPath,
+                MockLeaderRaftActor.props(Map.of(FOLLOWER_ID, follower1ActorPath,
                         FOLLOWER_ID2, follower2ActorPath), new MockRaftActorContext())
                         .withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID));
         ActorRef leaderCollector = newLeaderCollectorActor(leaderActor.underlyingActor());
@@ -1067,20 +1057,20 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         ActorRef follower1Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         final TestActorRef<CollectingMockRaftActor> follower1RaftActor = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(FOLLOWER_ID, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(),
+                CollectingMockRaftActor.props(FOLLOWER_ID, Map.of(LEADER_ID, leaderActor.path().toString(),
                         FOLLOWER_ID2, follower2ActorPath), configParams, NO_PERSISTENCE, follower1Collector)
                         .withDispatcher(Dispatchers.DefaultDispatcherId()), follower1ActorId);
 
         ActorRef follower2Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         final TestActorRef<CollectingMockRaftActor> follower2RaftActor = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(FOLLOWER_ID2, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(),
+                CollectingMockRaftActor.props(FOLLOWER_ID2, Map.of(LEADER_ID, leaderActor.path().toString(),
                         FOLLOWER_ID, follower1ActorPath), configParams, NO_PERSISTENCE, follower2Collector)
                         .withDispatcher(Dispatchers.DefaultDispatcherId()), follower2ActorId);
 
         // Send ChangeServersVotingStatus message
 
-        leaderActor.tell(new ChangeServersVotingStatus(ImmutableMap.of(LEADER_ID, false)), testKit.getRef());
+        leaderActor.tell(new ChangeServersVotingStatus(Map.of(LEADER_ID, false)), testKit.getRef());
         ServerChangeReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class);
         assertEquals("getStatus", ServerChangeStatus.OK, reply.getStatus());
 
@@ -1109,10 +1099,10 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         LOG.info("testChangeLeaderToNonVotingInSingleNode starting");
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.of(), new MockRaftActorContext())
+                MockLeaderRaftActor.props(Map.of(), new MockRaftActorContext())
                         .withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID));
 
-        leaderActor.tell(new ChangeServersVotingStatus(ImmutableMap.of(LEADER_ID, false)), testKit.getRef());
+        leaderActor.tell(new ChangeServersVotingStatus(Map.of(LEADER_ID, false)), testKit.getRef());
         ServerChangeReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class);
         assertEquals("getStatus", ServerChangeStatus.INVALID_REQUEST, reply.getStatus());
 
@@ -1134,7 +1124,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         // via the server config. The server config will also contain 2 voting peers that are down (ie no
         // actors created).
 
-        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
+        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of(
                 new ServerInfo(node1ID, false), new ServerInfo(node2ID, false),
                 new ServerInfo("downNode1", true), new ServerInfo("downNode2", true)));
         SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, 1, persistedServerConfig);
@@ -1149,14 +1139,14 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         ActorRef node1Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         TestActorRef<CollectingMockRaftActor> node1RaftActorRef = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(node1ID, ImmutableMap.<String, String>of(), configParams,
+                CollectingMockRaftActor.props(node1ID, Map.of(), configParams,
                         PERSISTENT, node1Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node1ID);
         CollectingMockRaftActor node1RaftActor = node1RaftActorRef.underlyingActor();
 
         ActorRef node2Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         TestActorRef<CollectingMockRaftActor> node2RaftActorRef = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(node2ID, ImmutableMap.<String, String>of(), configParams,
+                CollectingMockRaftActor.props(node2ID, Map.of(), configParams,
                         PERSISTENT, node2Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node2ID);
         CollectingMockRaftActor node2RaftActor = node2RaftActorRef.underlyingActor();
 
@@ -1183,7 +1173,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         // First send the message such that node1 has no peer address for node2 - should fail.
 
-        ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(ImmutableMap.of(node1ID, true,
+        ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(Map.of(node1ID, true,
                 node2ID, true, "downNode1", false, "downNode2", false));
         node1RaftActorRef.tell(changeServers, testKit.getRef());
         ServerChangeReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class);
@@ -1194,7 +1184,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         long term = node1RaftActor.getRaftActorContext().getTermInformation().getCurrentTerm();
         node1RaftActorRef.tell(new AppendEntries(term, "downNode1", -1L, -1L,
-                Collections.<ReplicatedLogEntry>emptyList(), 0, -1, (short)1), ActorRef.noSender());
+                List.of(), 0, -1, (short)1), ActorRef.noSender());
 
         // Wait for the ElectionTimeout to clear the leaderId. The leaderId must be null so on the next
         // ChangeServersVotingStatus message, it will try to elect a leader.
@@ -1241,7 +1231,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
                 ? actorFactory.createTestActorPath(node1ID) : peerId.equals(node2ID)
                         ? actorFactory.createTestActorPath(node2ID) : null;
 
-        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
+        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of(
                 new ServerInfo(node1ID, false), new ServerInfo(node2ID, true)));
         SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, 1, persistedServerConfig);
 
@@ -1257,7 +1247,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         ActorRef node1Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         TestActorRef<CollectingMockRaftActor> node1RaftActorRef = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(node1ID, ImmutableMap.<String, String>of(), configParams1,
+                CollectingMockRaftActor.props(node1ID, Map.of(), configParams1,
                         PERSISTENT, node1Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node1ID);
         final CollectingMockRaftActor node1RaftActor = node1RaftActorRef.underlyingActor();
 
@@ -1267,7 +1257,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         ActorRef node2Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         TestActorRef<CollectingMockRaftActor> node2RaftActorRef = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(node2ID, ImmutableMap.<String, String>of(), configParams2,
+                CollectingMockRaftActor.props(node2ID, Map.of(), configParams2,
                         PERSISTENT, node2Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node2ID);
         CollectingMockRaftActor node2RaftActor = node2RaftActorRef.underlyingActor();
 
@@ -1279,13 +1269,13 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         node2RaftActor.setDropMessageOfType(RequestVote.class);
 
-        ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(ImmutableMap.of(node1ID, true));
+        ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(Map.of(node1ID, true));
         node1RaftActorRef.tell(changeServers, testKit.getRef());
         ServerChangeReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class);
         assertEquals("getStatus", ServerChangeStatus.NO_LEADER, reply.getStatus());
 
-        assertEquals("Server config", ImmutableSet.of(nonVotingServer(node1ID), votingServer(node2ID)),
-            new HashSet<>(node1RaftActor.getRaftActorContext().getPeerServerInfo(true).getServerConfig()));
+        assertEquals("Server config", Set.of(nonVotingServer(node1ID), votingServer(node2ID)),
+            Set.copyOf(node1RaftActor.getRaftActorContext().getPeerServerInfo(true).getServerConfig()));
         assertEquals("getRaftState", RaftState.Follower, node1RaftActor.getRaftState());
 
         LOG.info("testChangeToVotingWithNoLeaderAndElectionTimeout ending");
@@ -1307,7 +1297,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         configParams.setElectionTimeoutFactor(3);
         configParams.setPeerAddressResolver(peerAddressResolver);
 
-        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
+        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of(
                 new ServerInfo(node1ID, false), new ServerInfo(node2ID, false)));
         SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, 1, persistedServerConfig);
 
@@ -1322,14 +1312,14 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         ActorRef node1Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         TestActorRef<CollectingMockRaftActor> node1RaftActorRef = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(node1ID, ImmutableMap.<String, String>of(), configParams,
+                CollectingMockRaftActor.props(node1ID, Map.of(), configParams,
                         PERSISTENT, node1Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node1ID);
         final CollectingMockRaftActor node1RaftActor = node1RaftActorRef.underlyingActor();
 
         ActorRef node2Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         TestActorRef<CollectingMockRaftActor> node2RaftActorRef = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(node2ID, ImmutableMap.<String, String>of(), configParams,
+                CollectingMockRaftActor.props(node2ID, Map.of(), configParams,
                         PERSISTENT, node2Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node2ID);
         final CollectingMockRaftActor node2RaftActor = node2RaftActorRef.underlyingActor();
 
@@ -1339,7 +1329,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         // forward the request to node2.
 
         ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(
-                ImmutableMap.of(node1ID, true, node2ID, true));
+                Map.of(node1ID, true, node2ID, true));
         node1RaftActorRef.tell(changeServers, testKit.getRef());
         ServerChangeReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class);
         assertEquals("getStatus", ServerChangeStatus.OK, reply.getStatus());
@@ -1373,7 +1363,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
                 ? actorFactory.createTestActorPath(node1ID) : peerId.equals(node2ID)
                         ? actorFactory.createTestActorPath(node2ID) : null);
 
-        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
+        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of(
                 new ServerInfo(node1ID, false), new ServerInfo(node2ID, true)));
         SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, 1, persistedServerConfig);
 
@@ -1385,14 +1375,14 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         ActorRef node1Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         TestActorRef<CollectingMockRaftActor> node1RaftActorRef = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(node1ID, ImmutableMap.<String, String>of(), configParams,
+                CollectingMockRaftActor.props(node1ID, Map.of(), configParams,
                         PERSISTENT, node1Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node1ID);
         final CollectingMockRaftActor node1RaftActor = node1RaftActorRef.underlyingActor();
 
         ActorRef node2Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         TestActorRef<CollectingMockRaftActor> node2RaftActorRef = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(node2ID, ImmutableMap.<String, String>of(), configParams,
+                CollectingMockRaftActor.props(node2ID, Map.of(), configParams,
                         PERSISTENT, node2Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node2ID);
         CollectingMockRaftActor node2RaftActor = node2RaftActorRef.underlyingActor();
 
@@ -1403,7 +1393,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         node2RaftActor.setDropMessageOfType(RequestVote.class);
 
-        ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(ImmutableMap.of(node1ID, true,
+        ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(Map.of(node1ID, true,
                 node2ID, true));
         node1RaftActorRef.tell(changeServers, testKit.getRef());
 
@@ -1464,7 +1454,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         ReplicatedLogEntry logEntry = log.get(log.lastIndex());
         assertEquals("Last log entry payload class", ServerConfigurationPayload.class, logEntry.getData().getClass());
         ServerConfigurationPayload payload = (ServerConfigurationPayload)logEntry.getData();
-        assertEquals("Server config", ImmutableSet.copyOf(expected), new HashSet<>(payload.getServerConfig()));
+        assertEquals("Server config", Set.of(expected), Set.copyOf(payload.getServerConfig()));
     }
 
     private static RaftActorContextImpl newFollowerContext(final String id,
@@ -1476,7 +1466,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         ElectionTermImpl termInfo = new ElectionTermImpl(noPersistence, id, LOG);
         termInfo.update(1, LEADER_ID);
         return new RaftActorContextImpl(actor, actor.underlyingActor().getContext(),
-                id, termInfo, -1, -1, ImmutableMap.of(LEADER_ID, ""), configParams,
+                id, termInfo, -1, -1, Map.of(LEADER_ID, ""), configParams,
                 noPersistence, applyState -> actor.tell(applyState, actor), LOG,  MoreExecutors.directExecutor());
     }
 
@@ -1486,7 +1476,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         AbstractMockRaftActor(final String id, final Map<String, String> peerAddresses,
                 final Optional<ConfigParams> config, final boolean persistent, final ActorRef collectorActor) {
-            super(builder().id(id).peerAddresses(peerAddresses).config(config.get())
+            super(builder().id(id).peerAddresses(peerAddresses).config(config.orElseThrow())
                     .persistent(Optional.of(persistent)));
             this.collectorActor = collectorActor;
         }
@@ -1573,9 +1563,9 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         @Override
         @SuppressWarnings("checkstyle:IllegalCatch")
         public void createSnapshot(final ActorRef actorRef, final Optional<OutputStream> installSnapshotStream) {
-            MockSnapshotState snapshotState = new MockSnapshotState(new ArrayList<>(getState()));
+            MockSnapshotState snapshotState = new MockSnapshotState(List.copyOf(getState()));
             if (installSnapshotStream.isPresent()) {
-                SerializationUtils.serialize(snapshotState, installSnapshotStream.get());
+                SerializationUtils.serialize(snapshotState, installSnapshotStream.orElseThrow());
             }
 
             actorRef.tell(new CaptureSnapshotReply(snapshotState, installSnapshotStream), actorRef);
@@ -1591,7 +1581,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
     public static class MockNewFollowerRaftActor extends AbstractMockRaftActor {
         public MockNewFollowerRaftActor(final ConfigParams config, final ActorRef collectorActor) {
-            super(NEW_SERVER_ID, new HashMap<>(), Optional.of(config), NO_PERSISTENCE, collectorActor);
+            super(NEW_SERVER_ID, Map.of(), Optional.of(config), NO_PERSISTENCE, collectorActor);
             setPersistence(false);
         }
 
index 96e04df15a803855a1b8dca4d71c20e29de4e613..fde56a9a21e1145895282d0a97fe8dd736e4807e 100644 (file)
@@ -23,6 +23,7 @@ import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.reset;
 import static org.mockito.Mockito.timeout;
 import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 import akka.actor.ActorRef;
 import akka.actor.PoisonPill;
@@ -37,15 +38,10 @@ import akka.persistence.SnapshotOffer;
 import akka.protobuf.ByteString;
 import akka.testkit.TestActorRef;
 import akka.testkit.javadsl.TestKit;
-import com.google.common.collect.ImmutableMap;
 import com.google.common.util.concurrent.Uninterruptibles;
 import java.io.ByteArrayOutputStream;
 import java.io.ObjectOutputStream;
 import java.time.Duration;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
@@ -141,22 +137,20 @@ public class RaftActorTest extends AbstractActorTest {
         // log entry.
         config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
 
-        ImmutableMap<String, String> peerAddresses = ImmutableMap.<String, String>builder()
-                .put("member1", "address").build();
+        Map<String, String> peerAddresses = Map.of("member1", "address");
         ActorRef followerActor = factory.createActor(MockRaftActor.props(persistenceId,
                 peerAddresses, config), persistenceId);
 
         kit.watch(followerActor);
 
-        List<ReplicatedLogEntry> snapshotUnappliedEntries = new ArrayList<>();
-        ReplicatedLogEntry entry1 = new SimpleReplicatedLogEntry(4, 1, new MockRaftActorContext.MockPayload("E"));
-        snapshotUnappliedEntries.add(entry1);
+        List<ReplicatedLogEntry> snapshotUnappliedEntries = List.of(
+            new SimpleReplicatedLogEntry(4, 1, new MockRaftActorContext.MockPayload("E")));
 
         int lastAppliedDuringSnapshotCapture = 3;
         int lastIndexDuringSnapshotCapture = 4;
 
         // 4 messages as part of snapshot, which are applied to state
-        MockSnapshotState snapshotState = new MockSnapshotState(Arrays.asList(
+        MockSnapshotState snapshotState = new MockSnapshotState(List.of(
                 new MockRaftActorContext.MockPayload("A"),
                 new MockRaftActorContext.MockPayload("B"),
                 new MockRaftActorContext.MockPayload("C"),
@@ -167,13 +161,9 @@ public class RaftActorTest extends AbstractActorTest {
         InMemorySnapshotStore.addSnapshot(persistenceId, snapshot);
 
         // add more entries after snapshot is taken
-        List<ReplicatedLogEntry> entries = new ArrayList<>();
         ReplicatedLogEntry entry2 = new SimpleReplicatedLogEntry(5, 1, new MockRaftActorContext.MockPayload("F", 2));
         ReplicatedLogEntry entry3 = new SimpleReplicatedLogEntry(6, 1, new MockRaftActorContext.MockPayload("G", 3));
         ReplicatedLogEntry entry4 = new SimpleReplicatedLogEntry(7, 1, new MockRaftActorContext.MockPayload("H", 4));
-        entries.add(entry2);
-        entries.add(entry3);
-        entries.add(entry4);
 
         final int lastAppliedToState = 5;
         final int lastIndex = 7;
@@ -199,7 +189,7 @@ public class RaftActorTest extends AbstractActorTest {
         mockRaftActor.waitForRecoveryComplete();
 
         RaftActorContext context = mockRaftActor.getRaftActorContext();
-        assertEquals("Journal log size", snapshotUnappliedEntries.size() + entries.size(),
+        assertEquals("Journal log size", snapshotUnappliedEntries.size() + 3,
                 context.getReplicatedLog().size());
         assertEquals("Journal data size", 10, context.getReplicatedLog().dataSize());
         assertEquals("Last index", lastIndex, context.getReplicatedLog().lastIndex());
@@ -223,8 +213,7 @@ public class RaftActorTest extends AbstractActorTest {
         config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
 
         TestActorRef<MockRaftActor> ref = factory.createTestActor(MockRaftActor.props(persistenceId,
-                ImmutableMap.<String, String>builder().put("member1", "address").build(),
-                config, createProvider()), persistenceId);
+                Map.of("member1", "address"), config, createProvider()), persistenceId);
 
         MockRaftActor mockRaftActor = ref.underlyingActor();
 
@@ -246,8 +235,7 @@ public class RaftActorTest extends AbstractActorTest {
         InMemoryJournal.addWriteMessagesCompleteLatch(persistenceId, 1);
 
         TestActorRef<MockRaftActor> ref = factory.createTestActor(MockRaftActor.props(persistenceId,
-                ImmutableMap.<String, String>builder().put("member1", "address").build(),
-                config, createProvider())
+                Map.of("member1", "address"), config, createProvider())
                 .withDispatcher(Dispatchers.DefaultDispatcherId()), persistenceId);
 
         InMemoryJournal.waitForWriteMessagesComplete(persistenceId);
@@ -258,8 +246,7 @@ public class RaftActorTest extends AbstractActorTest {
         factory.killActor(ref, kit);
 
         config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
-        ref = factory.createTestActor(MockRaftActor.props(persistenceId,
-                ImmutableMap.<String, String>builder().put("member1", "address").build(), config,
+        ref = factory.createTestActor(MockRaftActor.props(persistenceId, Map.of("member1", "address"), config,
                 createProvider()).withDispatcher(Dispatchers.DefaultDispatcherId()),
                 factory.generateActorId("follower-"));
 
@@ -284,7 +271,7 @@ public class RaftActorTest extends AbstractActorTest {
         config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
 
         TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
-                Collections.<String, String>emptyMap(), config), persistenceId);
+                Map.of(), config), persistenceId);
 
         MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
 
@@ -295,7 +282,7 @@ public class RaftActorTest extends AbstractActorTest {
         mockRaftActor.setRaftActorRecoverySupport(mockSupport);
 
         Snapshot snapshot = Snapshot.create(ByteState.of(new byte[]{1}),
-                Collections.<ReplicatedLogEntry>emptyList(), 3, 1, 3, 1, -1, null, null);
+                List.of(), 3, 1, 3, 1, -1, null, null);
         SnapshotOffer snapshotOffer = new SnapshotOffer(new SnapshotMetadata("test", 6, 12345), snapshot);
         mockRaftActor.handleRecover(snapshotOffer);
 
@@ -336,28 +323,29 @@ public class RaftActorTest extends AbstractActorTest {
         // Wait for akka's recovery to complete so it doesn't interfere.
         mockRaftActor.waitForRecoveryComplete();
 
-        ApplySnapshot applySnapshot = new ApplySnapshot(mock(Snapshot.class));
-        doReturn(true).when(mockSupport).handleSnapshotMessage(same(applySnapshot), any(ActorRef.class));
+        ApplySnapshot applySnapshot = new ApplySnapshot(
+            Snapshot.create(null, null, 0, 0, 0, 0, 0, persistenceId, null));
+        when(mockSupport.handleSnapshotMessage(same(applySnapshot), any(ActorRef.class))).thenReturn(true);
         mockRaftActor.handleCommand(applySnapshot);
 
         CaptureSnapshotReply captureSnapshotReply = new CaptureSnapshotReply(ByteState.empty(), Optional.empty());
-        doReturn(true).when(mockSupport).handleSnapshotMessage(same(captureSnapshotReply), any(ActorRef.class));
+        when(mockSupport.handleSnapshotMessage(same(captureSnapshotReply), any(ActorRef.class))).thenReturn(true);
         mockRaftActor.handleCommand(captureSnapshotReply);
 
         SaveSnapshotSuccess saveSnapshotSuccess = new SaveSnapshotSuccess(new SnapshotMetadata("", 0L, 0L));
-        doReturn(true).when(mockSupport).handleSnapshotMessage(same(saveSnapshotSuccess), any(ActorRef.class));
+        when(mockSupport.handleSnapshotMessage(same(saveSnapshotSuccess), any(ActorRef.class))).thenReturn(true);
         mockRaftActor.handleCommand(saveSnapshotSuccess);
 
         SaveSnapshotFailure saveSnapshotFailure = new SaveSnapshotFailure(new SnapshotMetadata("", 0L, 0L),
                 new Throwable());
-        doReturn(true).when(mockSupport).handleSnapshotMessage(same(saveSnapshotFailure), any(ActorRef.class));
+        when(mockSupport.handleSnapshotMessage(same(saveSnapshotFailure), any(ActorRef.class))).thenReturn(true);
         mockRaftActor.handleCommand(saveSnapshotFailure);
 
-        doReturn(true).when(mockSupport).handleSnapshotMessage(same(RaftActorSnapshotMessageSupport.COMMIT_SNAPSHOT),
-                any(ActorRef.class));
+        when(mockSupport.handleSnapshotMessage(same(RaftActorSnapshotMessageSupport.COMMIT_SNAPSHOT),
+            any(ActorRef.class))).thenReturn(true);
         mockRaftActor.handleCommand(RaftActorSnapshotMessageSupport.COMMIT_SNAPSHOT);
 
-        doReturn(true).when(mockSupport).handleSnapshotMessage(same(GetSnapshot.INSTANCE), any(ActorRef.class));
+        when(mockSupport.handleSnapshotMessage(same(GetSnapshot.INSTANCE), any(ActorRef.class))).thenReturn(true);
         mockRaftActor.handleCommand(GetSnapshot.INSTANCE);
 
         verify(mockSupport).handleSnapshotMessage(same(applySnapshot), any(ActorRef.class));
@@ -381,7 +369,7 @@ public class RaftActorTest extends AbstractActorTest {
         DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
 
         TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
-                Collections.<String, String>emptyMap(), config, dataPersistenceProvider), persistenceId);
+                Map.of(), config, dataPersistenceProvider), persistenceId);
 
         MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
 
@@ -405,7 +393,7 @@ public class RaftActorTest extends AbstractActorTest {
         DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
 
         TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
-                Collections.<String, String>emptyMap(), config, dataPersistenceProvider), persistenceId);
+                Map.of(), config, dataPersistenceProvider), persistenceId);
 
         MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
 
@@ -518,7 +506,7 @@ public class RaftActorTest extends AbstractActorTest {
         String persistenceId = factory.generateActorId("notifier-");
 
         factory.createActor(MockRaftActor.builder().id(persistenceId)
-                .peerAddresses(ImmutableMap.of("leader", "fake/path"))
+                .peerAddresses(Map.of("leader", "fake/path"))
                 .config(config).roleChangeNotifier(notifierActor).props());
 
         List<RoleChanged> matches =  null;
@@ -560,8 +548,7 @@ public class RaftActorTest extends AbstractActorTest {
 
         DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
 
-        Map<String, String> peerAddresses = new HashMap<>();
-        peerAddresses.put(follower1Id, followerActor1.path().toString());
+        Map<String, String> peerAddresses = Map.of(follower1Id, followerActor1.path().toString());
 
         TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
                 MockRaftActor.props(persistenceId, peerAddresses, config, dataPersistenceProvider), persistenceId);
@@ -607,7 +594,7 @@ public class RaftActorTest extends AbstractActorTest {
 
         assertEquals(8, leaderActor.getReplicatedLog().size());
 
-        MockSnapshotState snapshotState = new MockSnapshotState(Arrays.asList(
+        MockSnapshotState snapshotState = new MockSnapshotState(List.of(
                 new MockRaftActorContext.MockPayload("foo-0"),
                 new MockRaftActorContext.MockPayload("foo-1"),
                 new MockRaftActorContext.MockPayload("foo-2"),
@@ -649,8 +636,7 @@ public class RaftActorTest extends AbstractActorTest {
 
         DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
 
-        Map<String, String> peerAddresses = new HashMap<>();
-        peerAddresses.put(leaderId, leaderActor1.path().toString());
+        Map<String, String> peerAddresses = Map.of(leaderId, leaderActor1.path().toString());
 
         TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
                 MockRaftActor.props(persistenceId, peerAddresses, config, dataPersistenceProvider), persistenceId);
@@ -683,15 +669,15 @@ public class RaftActorTest extends AbstractActorTest {
         assertEquals(6, followerActor.getReplicatedLog().size());
 
         //fake snapshot on index 6
-        List<ReplicatedLogEntry> entries = Arrays.asList(
-                (ReplicatedLogEntry) new SimpleReplicatedLogEntry(6, 1, new MockRaftActorContext.MockPayload("foo-6")));
+        List<ReplicatedLogEntry> entries = List.of(
+                new SimpleReplicatedLogEntry(6, 1, new MockRaftActorContext.MockPayload("foo-6")));
         followerActor.handleCommand(new AppendEntries(1, leaderId, 5, 1, entries, 5, 5, (short)0));
         assertEquals(7, followerActor.getReplicatedLog().size());
 
         //fake snapshot on index 7
         assertEquals(RaftState.Follower, followerActor.getCurrentBehavior().state());
 
-        entries = Arrays.asList((ReplicatedLogEntry) new SimpleReplicatedLogEntry(7, 1,
+        entries = List.of(new SimpleReplicatedLogEntry(7, 1,
                 new MockRaftActorContext.MockPayload("foo-7")));
         followerActor.handleCommand(new AppendEntries(1, leaderId, 6, 1, entries, 6, 6, (short) 0));
         assertEquals(8, followerActor.getReplicatedLog().size());
@@ -699,7 +685,7 @@ public class RaftActorTest extends AbstractActorTest {
         assertEquals(RaftState.Follower, followerActor.getCurrentBehavior().state());
 
 
-        ByteString snapshotBytes = fromObject(Arrays.asList(
+        ByteString snapshotBytes = fromObject(List.of(
                 new MockRaftActorContext.MockPayload("foo-0"),
                 new MockRaftActorContext.MockPayload("foo-1"),
                 new MockRaftActorContext.MockPayload("foo-2"),
@@ -716,8 +702,7 @@ public class RaftActorTest extends AbstractActorTest {
         assertEquals(3, followerActor.getReplicatedLog().size()); //indexes 5,6,7 left in the log
         assertEquals(7, followerActor.getReplicatedLog().lastIndex());
 
-        entries = Arrays.asList((ReplicatedLogEntry) new SimpleReplicatedLogEntry(8, 1,
-                new MockRaftActorContext.MockPayload("foo-7")));
+        entries = List.of(new SimpleReplicatedLogEntry(8, 1, new MockRaftActorContext.MockPayload("foo-7")));
         // send an additional entry 8 with leaderCommit = 7
         followerActor.handleCommand(new AppendEntries(1, leaderId, 7, 1, entries, 7, 7, (short) 0));
 
@@ -740,9 +725,9 @@ public class RaftActorTest extends AbstractActorTest {
 
         DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
 
-        Map<String, String> peerAddresses = new HashMap<>();
-        peerAddresses.put(follower1Id, followerActor1.path().toString());
-        peerAddresses.put(follower2Id, followerActor2.path().toString());
+        Map<String, String> peerAddresses = Map.of(
+            follower1Id, followerActor1.path().toString(),
+            follower2Id, followerActor2.path().toString());
 
         TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
                 MockRaftActor.props(persistenceId, peerAddresses, config, dataPersistenceProvider), persistenceId);
@@ -791,7 +776,7 @@ public class RaftActorTest extends AbstractActorTest {
         assertEquals("Fake snapshot should not happen when Initiate is in progress", 5,
                 leaderActor.getReplicatedLog().size());
 
-        ByteString snapshotBytes = fromObject(Arrays.asList(
+        ByteString snapshotBytes = fromObject(List.of(
                 new MockRaftActorContext.MockPayload("foo-0"),
                 new MockRaftActorContext.MockPayload("foo-1"),
                 new MockRaftActorContext.MockPayload("foo-2"),
@@ -819,7 +804,7 @@ public class RaftActorTest extends AbstractActorTest {
 
         DataPersistenceProvider dataPersistenceProvider = createProvider();
 
-        Map<String, String> peerAddresses = ImmutableMap.<String, String>builder().put("member1", "address").build();
+        Map<String, String> peerAddresses = Map.of("member1", "address");
 
         TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
                 MockRaftActor.props(persistenceId, peerAddresses, config, dataPersistenceProvider), persistenceId);
@@ -863,7 +848,7 @@ public class RaftActorTest extends AbstractActorTest {
 
         DataPersistenceProvider dataPersistenceProvider = createProvider();
 
-        Map<String, String> peerAddresses = ImmutableMap.<String, String>builder().put("member1", "address").build();
+        Map<String, String> peerAddresses = Map.of("member1", "address");
 
         TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
                 MockRaftActor.props(persistenceId, peerAddresses, config, dataPersistenceProvider), persistenceId);
@@ -909,7 +894,7 @@ public class RaftActorTest extends AbstractActorTest {
 
         DataPersistenceProvider dataPersistenceProvider = createProvider();
 
-        Map<String, String> peerAddresses = ImmutableMap.<String, String>builder().build();
+        Map<String, String> peerAddresses = Map.of();
 
         TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
                 MockRaftActor.props(persistenceId, peerAddresses, config, dataPersistenceProvider), persistenceId);
@@ -963,8 +948,7 @@ public class RaftActorTest extends AbstractActorTest {
     public void testUpdateConfigParam() {
         DefaultConfigParamsImpl emptyConfig = new DefaultConfigParamsImpl();
         String persistenceId = factory.generateActorId("follower-");
-        ImmutableMap<String, String> peerAddresses =
-            ImmutableMap.<String, String>builder().put("member1", "address").build();
+        Map<String, String> peerAddresses = Map.of("member1", "address");
         DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
 
         TestActorRef<MockRaftActor> actorRef = factory.createTestActor(
@@ -1029,7 +1013,7 @@ public class RaftActorTest extends AbstractActorTest {
                 new MockRaftActorContext.MockPayload("C")));
 
         TestActorRef<MockRaftActor> raftActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
-                ImmutableMap.<String, String>builder().put("member1", "address").build(), config)
+                Map.of("member1", "address"), config)
                     .withDispatcher(Dispatchers.DefaultDispatcherId()), persistenceId);
         MockRaftActor mockRaftActor = raftActorRef.underlyingActor();
 
@@ -1105,13 +1089,13 @@ public class RaftActorTest extends AbstractActorTest {
         DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
         config.setCustomRaftPolicyImplementationClass(DisableElectionsRaftPolicy.class.getName());
 
-        List<ReplicatedLogEntry> snapshotUnappliedEntries = new ArrayList<>();
-        snapshotUnappliedEntries.add(new SimpleReplicatedLogEntry(4, 1, new MockRaftActorContext.MockPayload("E")));
+        List<ReplicatedLogEntry> snapshotUnappliedEntries = List.of(
+            new SimpleReplicatedLogEntry(4, 1, new MockRaftActorContext.MockPayload("E")));
 
         int snapshotLastApplied = 3;
         int snapshotLastIndex = 4;
 
-        MockSnapshotState snapshotState = new MockSnapshotState(Arrays.asList(
+        MockSnapshotState snapshotState = new MockSnapshotState(List.of(
                 new MockRaftActorContext.MockPayload("A"),
                 new MockRaftActorContext.MockPayload("B"),
                 new MockRaftActorContext.MockPayload("C"),
@@ -1152,7 +1136,7 @@ public class RaftActorTest extends AbstractActorTest {
 
         // Test with data persistence disabled
 
-        snapshot = Snapshot.create(EmptyState.INSTANCE, Collections.<ReplicatedLogEntry>emptyList(),
+        snapshot = Snapshot.create(EmptyState.INSTANCE, List.of(),
                 -1, -1, -1, -1, 5, "member-1", null);
 
         persistenceId = factory.generateActorId("test-actor-");
@@ -1182,9 +1166,9 @@ public class RaftActorTest extends AbstractActorTest {
         DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
         config.setCustomRaftPolicyImplementationClass(DisableElectionsRaftPolicy.class.getName());
 
-        List<MockPayload> state = Arrays.asList(new MockRaftActorContext.MockPayload("A"));
+        List<MockPayload> state = List.of(new MockRaftActorContext.MockPayload("A"));
         Snapshot snapshot = Snapshot.create(ByteState.of(fromObject(state).toByteArray()),
-                Arrays.<ReplicatedLogEntry>asList(), 5, 2, 5, 2, 2, "member-1", null);
+                List.of(), 5, 2, 5, 2, 2, "member-1", null);
 
         InMemoryJournal.addEntry(persistenceId, 1, new SimpleReplicatedLogEntry(0, 1,
                 new MockRaftActorContext.MockPayload("B")));
@@ -1220,7 +1204,7 @@ public class RaftActorTest extends AbstractActorTest {
 
         String persistenceId = factory.generateActorId("test-actor-");
         InMemoryJournal.addEntry(persistenceId, 1,  new SimpleReplicatedLogEntry(0, 1,
-                new ServerConfigurationPayload(Arrays.asList(new ServerInfo(persistenceId, false)))));
+                new ServerConfigurationPayload(List.of(new ServerInfo(persistenceId, false)))));
 
         TestActorRef<MockRaftActor> raftActorRef = factory.createTestActor(MockRaftActor.builder().id(persistenceId)
                 .config(config).props().withDispatcher(Dispatchers.DefaultDispatcherId()), persistenceId);
@@ -1254,7 +1238,7 @@ public class RaftActorTest extends AbstractActorTest {
 
         mockRaftActor.waitForInitializeBehaviorComplete();
 
-        raftActorRef.tell(new AppendEntries(1L, "leader", 0L, 1L, Collections.<ReplicatedLogEntry>emptyList(),
+        raftActorRef.tell(new AppendEntries(1L, "leader", 0L, 1L, List.of(),
                 0L, -1L, (short)1), ActorRef.noSender());
         LeaderStateChanged leaderStateChange = MessageCollectorActor.expectFirstMatching(
                 notifierActor, LeaderStateChanged.class);
@@ -1287,7 +1271,7 @@ public class RaftActorTest extends AbstractActorTest {
         doReturn(true).when(mockPersistenceProvider).isRecoveryApplicable();
 
         TestActorRef<MockRaftActor> leaderActorRef = factory.createTestActor(
-                MockRaftActor.props(leaderId, ImmutableMap.of(followerId, followerActor.path().toString()), config,
+                MockRaftActor.props(leaderId, Map.of(followerId, followerActor.path().toString()), config,
                         mockPersistenceProvider), leaderId);
         MockRaftActor leaderActor = leaderActorRef.underlyingActor();
         leaderActor.waitForInitializeBehaviorComplete();
@@ -1329,7 +1313,7 @@ public class RaftActorTest extends AbstractActorTest {
         config.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
 
         TestActorRef<MockRaftActor> leaderActorRef = factory.createTestActor(
-                MockRaftActor.props(leaderId, ImmutableMap.of(followerId, followerActor.path().toString()), config),
+                MockRaftActor.props(leaderId, Map.of(followerId, followerActor.path().toString()), config),
                     leaderId);
         MockRaftActor leaderActor = leaderActorRef.underlyingActor();
         leaderActor.waitForInitializeBehaviorComplete();
@@ -1369,8 +1353,7 @@ public class RaftActorTest extends AbstractActorTest {
 
         TestRaftActor.Builder builder = TestRaftActor.newBuilder()
                 .id(leaderId)
-                .peerAddresses(ImmutableMap.of(followerId,
-                        mockFollowerActorRef.path().toString()))
+                .peerAddresses(Map.of(followerId, mockFollowerActorRef.path().toString()))
                 .config(config)
                 .collectorActor(factory.createActor(
                         MessageCollectorActor.props(), factory.generateActorId(leaderId + "-collector")));
index 6386d6c6ba1e7a9453161c3f67caa146e1dcd543..7d6b8988d32dbd076f5d87826e4984e0148b857a 100644 (file)
@@ -7,6 +7,8 @@
  */
 package org.opendaylight.controller.cluster.raft;
 
+import static org.junit.Assert.fail;
+
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.pattern.Patterns;
@@ -16,7 +18,6 @@ import akka.util.Timeout;
 import com.google.common.util.concurrent.Uninterruptibles;
 import java.util.Optional;
 import java.util.concurrent.TimeUnit;
-import org.junit.Assert;
 import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
 import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
 import org.slf4j.Logger;
@@ -31,7 +32,7 @@ public class RaftActorTestKit extends TestKit {
 
     public RaftActorTestKit(final ActorSystem actorSystem, final String actorName) {
         super(actorSystem);
-        raftActor = this.getSystem().actorOf(MockRaftActor.builder().id(actorName).props(), actorName);
+        raftActor = getSystem().actorOf(MockRaftActor.builder().id(actorName).props(), actorName);
     }
 
     public ActorRef getRaftActor() {
@@ -65,6 +66,6 @@ public class RaftActorTestKit extends TestKit {
             Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
         }
 
-        Assert.fail("Leader not found for actorRef " + actorRef.path());
+        fail("Leader not found for actorRef " + actorRef.path());
     }
 }
index 7004ca88787707c7b094d4c1e520db9fed5076bb..b3da66c0ff448d045742e2a0da2ced24fb325a19 100644 (file)
@@ -12,9 +12,8 @@ import static org.junit.Assert.assertEquals;
 import akka.actor.ActorRef;
 import akka.persistence.SaveSnapshotSuccess;
 import akka.testkit.TestActorRef;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Lists;
 import java.util.List;
+import java.util.Map;
 import org.junit.Before;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
@@ -43,7 +42,7 @@ public class RecoveryIntegrationSingleNodeTest extends AbstractRaftActorIntegrat
 
         String persistenceId = factory.generateActorId("singleNode");
         TestActorRef<AbstractRaftActorIntegrationTest.TestRaftActor> singleNodeActorRef =
-                newTestRaftActor(persistenceId, ImmutableMap.<String, String>builder().build(), leaderConfigParams);
+                newTestRaftActor(persistenceId, Map.of(), leaderConfigParams);
 
         waitUntilLeader(singleNodeActorRef);
 
@@ -75,8 +74,9 @@ public class RecoveryIntegrationSingleNodeTest extends AbstractRaftActorIntegrat
 
         assertEquals("Last applied", 5, singleNodeContext.getLastApplied());
 
-        assertEquals("Incorrect State after snapshot success is received ", Lists.newArrayList(payload0, payload1,
-                payload2, payload3, payload4, payload5), singleNodeActorRef.underlyingActor().getState());
+        assertEquals("Incorrect State after snapshot success is received ",
+                List.of(payload0, payload1, payload2, payload3, payload4, payload5),
+                singleNodeActorRef.underlyingActor().getState());
 
         InMemoryJournal.waitForWriteMessagesComplete(persistenceId);
 
@@ -87,19 +87,17 @@ public class RecoveryIntegrationSingleNodeTest extends AbstractRaftActorIntegrat
         assertEquals(1, persistedSnapshots.size());
 
         List<Object> snapshottedState = MockRaftActor.fromState(persistedSnapshots.get(0).getState());
-        assertEquals("Incorrect Snapshot", Lists.newArrayList(payload0, payload1, payload2, payload3),
-                snapshottedState);
+        assertEquals("Incorrect Snapshot", List.of(payload0, payload1, payload2, payload3), snapshottedState);
 
         //recovery logic starts
         killActor(singleNodeActorRef);
 
-        singleNodeActorRef = newTestRaftActor(persistenceId,
-                ImmutableMap.<String, String>builder().build(), leaderConfigParams);
+        singleNodeActorRef = newTestRaftActor(persistenceId, Map.of(), leaderConfigParams);
 
         singleNodeActorRef.underlyingActor().waitForRecoveryComplete();
 
-        assertEquals("Incorrect State after Recovery ", Lists.newArrayList(payload0, payload1, payload2, payload3,
-                payload4, payload5), singleNodeActorRef.underlyingActor().getState());
-
+        assertEquals("Incorrect State after Recovery ",
+                List.of(payload0, payload1, payload2, payload3, payload4, payload5),
+                singleNodeActorRef.underlyingActor().getState());
     }
 }
index ca53d2e6c59658c9310dd7c559d1ea0b15226ec4..f197ba29a0e5c2ea0b1a2ce1480aad003f4bc40a 100644 (file)
@@ -11,9 +11,6 @@ import static org.junit.Assert.assertEquals;
 
 import akka.actor.ActorRef;
 import akka.persistence.SaveSnapshotSuccess;
-import com.google.common.collect.ImmutableMap;
-import java.util.Arrays;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import org.junit.Before;
@@ -38,15 +35,12 @@ public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest {
 
     @Before
     public void setup() {
-        follower1Actor = newTestRaftActor(follower1Id, ImmutableMap.of(leaderId, testActorPath(leaderId)),
+        follower1Actor = newTestRaftActor(follower1Id, Map.of(leaderId, testActorPath(leaderId)),
                 newFollowerConfigParams());
 
-        Map<String, String> leaderPeerAddresses = new HashMap<>();
-        leaderPeerAddresses.put(follower1Id, follower1Actor.path().toString());
-        leaderPeerAddresses.put(follower2Id, "");
-
         leaderConfigParams = newLeaderConfigParams();
-        leaderActor = newTestRaftActor(leaderId, leaderPeerAddresses, leaderConfigParams);
+        leaderActor = newTestRaftActor(leaderId, Map.of(follower1Id, follower1Actor.path().toString(), follower2Id, ""),
+            leaderConfigParams);
 
         follower1CollectorActor = follower1Actor.underlyingActor().collectorActor();
         leaderCollectorActor = leaderActor.underlyingActor().collectorActor();
@@ -96,7 +90,7 @@ public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest {
         assertEquals("Leader commit index", 4, leaderContext.getCommitIndex());
         assertEquals("Leader last applied", 4, leaderContext.getLastApplied());
 
-        assertEquals("Leader state", Arrays.asList(payload0, payload1, payload2, payload3, payload4),
+        assertEquals("Leader state", List.of(payload0, payload1, payload2, payload3, payload4),
                 leaderActor.underlyingActor().getState());
     }
 
@@ -135,7 +129,7 @@ public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest {
         assertEquals("Leader commit index", 4, leaderContext.getCommitIndex());
         assertEquals("Leader last applied", 4, leaderContext.getLastApplied());
 
-        assertEquals("Leader state", Arrays.asList(payload0, payload1, payload2, payload3, payload4),
+        assertEquals("Leader state", List.of(payload0, payload1, payload2, payload3, payload4),
                 leaderActor.underlyingActor().getState());
     }
 
@@ -146,8 +140,8 @@ public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest {
 
         leader = leaderActor.underlyingActor().getCurrentBehavior();
 
-        follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId)),
-                newFollowerConfigParams());
+        follower2Actor = newTestRaftActor(follower2Id,
+                Map.of(leaderId, testActorPath(leaderId)), newFollowerConfigParams());
         follower2CollectorActor = follower2Actor.underlyingActor().collectorActor();
 
         leaderActor.tell(new SetPeerAddress(follower2Id, follower2Actor.path().toString()), ActorRef.noSender());
@@ -168,8 +162,8 @@ public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest {
 
         InMemoryJournal.clear();
 
-        follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId)),
-                newFollowerConfigParams());
+        follower2Actor = newTestRaftActor(follower2Id,
+                Map.of(leaderId, testActorPath(leaderId)), newFollowerConfigParams());
         TestRaftActor follower2Underlying = follower2Actor.underlyingActor();
         follower2CollectorActor = follower2Underlying.collectorActor();
         follower2Context = follower2Underlying.getRaftActorContext();
@@ -182,7 +176,7 @@ public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest {
         // Wait for the follower to persist the snapshot.
         MessageCollectorActor.expectFirstMatching(follower2CollectorActor, SaveSnapshotSuccess.class);
 
-        final List<MockPayload> expFollowerState = Arrays.asList(payload0, payload1, payload2);
+        final List<MockPayload> expFollowerState = List.of(payload0, payload1, payload2);
 
         assertEquals("Follower commit index", 2, follower2Context.getCommitIndex());
         assertEquals("Follower last applied", 2, follower2Context.getLastApplied());
@@ -191,7 +185,7 @@ public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest {
 
         killActor(follower2Actor);
 
-        follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId)),
+        follower2Actor = newTestRaftActor(follower2Id, Map.of(leaderId, testActorPath(leaderId)),
                 newFollowerConfigParams());
 
         follower2Underlying = follower2Actor.underlyingActor();
@@ -244,9 +238,9 @@ public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest {
 
         reinstateLeaderActor();
 
-        assertEquals("Leader last index", 5 , leaderActor.underlyingActor().getReplicatedLog().lastIndex());
-        assertEquals(payload4, leaderActor.underlyingActor().getReplicatedLog().get(4).getData());
-        assertEquals(payload5, leaderActor.underlyingActor().getReplicatedLog().get(5).getData());
+        final var log = leaderActor.underlyingActor().getReplicatedLog();
+        assertEquals("Leader last index", 5, log.lastIndex());
+        assertEquals(List.of(payload4, payload5), List.of(log.get(4).getData(), log.get(5).getData()));
     }
 
     private void reinstateLeaderActor() {
index 5b8ec2ec1590fc9c42ec09d3b1d42cc702ec19ab..542828b9d0404eceeca87410426c93a64fa3ba7b 100644 (file)
@@ -17,7 +17,7 @@ import static org.mockito.Mockito.verifyNoMoreInteractions;
 
 import akka.japi.Procedure;
 import com.google.common.util.concurrent.MoreExecutors;
-import java.util.Collections;
+import java.util.Map;
 import java.util.function.Consumer;
 import org.junit.Before;
 import org.junit.Test;
@@ -56,7 +56,7 @@ public class ReplicatedLogImplTest {
     @Before
     public void setup() {
         context = new RaftActorContextImpl(null, null, "test",
-                new ElectionTermImpl(mockPersistence, "test", LOG), -1, -1, Collections.emptyMap(),
+                new ElectionTermImpl(mockPersistence, "test", LOG), -1, -1, Map.of(),
                 configParams, mockPersistence, applyState -> { }, LOG,  MoreExecutors.directExecutor());
     }
 
index 050b0ddf35d0391a64e47577b391e3d95918a471..70f67425c3951c987c982999b664d1ae17d37625 100644 (file)
@@ -10,8 +10,8 @@ package org.opendaylight.controller.cluster.raft;
 import static org.junit.Assert.assertEquals;
 
 import akka.persistence.SaveSnapshotSuccess;
-import com.google.common.collect.ImmutableMap;
 import java.util.List;
+import java.util.Map;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
 import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
@@ -67,15 +67,15 @@ public class ReplicationAndSnapshotsIntegrationTest extends AbstractRaftActorInt
 
         DefaultConfigParamsImpl followerConfigParams = newFollowerConfigParams();
         followerConfigParams.setSnapshotBatchCount(snapshotBatchCount);
-        follower1Actor = newTestRaftActor(follower1Id, ImmutableMap.of(leaderId, testActorPath(leaderId),
+        follower1Actor = newTestRaftActor(follower1Id, Map.of(leaderId, testActorPath(leaderId),
                 follower2Id, testActorPath(follower2Id)), followerConfigParams);
 
-        follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId),
+        follower2Actor = newTestRaftActor(follower2Id, Map.of(leaderId, testActorPath(leaderId),
                 follower1Id, testActorPath(follower1Id)), followerConfigParams);
 
-        peerAddresses = ImmutableMap.<String, String>builder()
-                .put(follower1Id, follower1Actor.path().toString())
-                .put(follower2Id, follower2Actor.path().toString()).build();
+        peerAddresses = Map.of(
+                follower1Id, follower1Actor.path().toString(),
+                follower2Id, follower2Actor.path().toString());
 
         leaderConfigParams = newLeaderConfigParams();
         leaderActor = newTestRaftActor(leaderId, peerAddresses, leaderConfigParams);
index d6a53a0aeeb6e84b86d457b3ca667dbaee111101..f2658957e1e173d66e30839cd3938ade1c57f870 100644 (file)
@@ -8,21 +8,20 @@
 package org.opendaylight.controller.cluster.raft;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 
 import akka.actor.ActorRef;
 import akka.persistence.SaveSnapshotSuccess;
-import com.google.common.collect.ImmutableMap;
 import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.Arrays;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import org.apache.commons.lang3.SerializationUtils;
 import org.eclipse.jdt.annotation.Nullable;
-import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
 import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
@@ -61,15 +60,15 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
         InMemoryJournal.addEntry(leaderId, 1, new UpdateElectionTerm(initialTerm, leaderId));
 
         // Create the leader and 2 follower actors.
-        follower1Actor = newTestRaftActor(follower1Id, ImmutableMap.of(leaderId, testActorPath(leaderId),
+        follower1Actor = newTestRaftActor(follower1Id, Map.of(leaderId, testActorPath(leaderId),
                 follower2Id, testActorPath(follower2Id)), newFollowerConfigParams());
 
-        follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId),
+        follower2Actor = newTestRaftActor(follower2Id, Map.of(leaderId, testActorPath(leaderId),
                 follower1Id, testActorPath(follower1Id)), newFollowerConfigParams());
 
-        Map<String, String> leaderPeerAddresses = ImmutableMap.<String, String>builder()
-                .put(follower1Id, follower1Actor.path().toString())
-                .put(follower2Id, follower2Actor.path().toString()).build();
+        Map<String, String> leaderPeerAddresses = Map.of(
+                follower1Id, follower1Actor.path().toString(),
+                follower2Id, follower2Actor.path().toString());
 
         leaderConfigParams = newLeaderConfigParams();
         leaderActor = newTestRaftActor(leaderId, leaderPeerAddresses, leaderConfigParams);
@@ -86,7 +85,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
         follower2 = follower2Actor.underlyingActor().getCurrentBehavior();
 
         currentTerm = leaderContext.getTermInformation().getCurrentTerm();
-        assertEquals("Current term > " + initialTerm, true, currentTerm > initialTerm);
+        assertTrue("Current term > " + initialTerm, currentTerm > initialTerm);
 
         leaderCollectorActor = leaderActor.underlyingActor().collectorActor();
         follower1CollectorActor = follower1Actor.underlyingActor().collectorActor();
@@ -96,7 +95,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
     }
 
     private void setupFollower2() {
-        follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId),
+        follower2Actor = newTestRaftActor(follower2Id, Map.of(leaderId, testActorPath(leaderId),
                 follower1Id, testActorPath(follower1Id)), newFollowerConfigParams());
 
         follower2Context = follower2Actor.underlyingActor().getRaftActorContext();
@@ -169,7 +168,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
         // to catch it up because no snapshotting was done so the follower's next index was present in the log.
         InstallSnapshot installSnapshot = MessageCollectorActor.getFirstMatching(follower2CollectorActor,
                 InstallSnapshot.class);
-        Assert.assertNull("Follower 2 received unexpected InstallSnapshot", installSnapshot);
+        assertNull("Follower 2 received unexpected InstallSnapshot", installSnapshot);
 
         testLog.info("testReplicationsWithLaggingFollowerCaughtUpViaAppendEntries complete");
     }
@@ -254,7 +253,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
         // Verify the leader did not try to install a snapshot to catch up follower 2.
         InstallSnapshot installSnapshot = MessageCollectorActor.getFirstMatching(follower2CollectorActor,
                 InstallSnapshot.class);
-        Assert.assertNull("Follower 2 received unexpected InstallSnapshot", installSnapshot);
+        assertNull("Follower 2 received unexpected InstallSnapshot", installSnapshot);
 
         // Ensure there's at least 1 more heartbeat.
         MessageCollectorActor.clearMessages(leaderCollectorActor);
@@ -364,7 +363,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
 
         // Send a server config change to test that the install snapshot includes the server config.
 
-        ServerConfigurationPayload serverConfig = new ServerConfigurationPayload(Arrays.asList(
+        ServerConfigurationPayload serverConfig = new ServerConfigurationPayload(List.of(
                 new ServerInfo(leaderId, true),
                 new ServerInfo(follower1Id, false),
                 new ServerInfo(follower2Id, false)));
@@ -452,7 +451,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
 
         setupFollower2();
 
-        MessageCollectorActor.expectMatching(follower2CollectorActor, InstallSnapshot.class, 5);
+        MessageCollectorActor.expectMatching(follower2CollectorActor, InstallSnapshot.class, 1);
 
         follower2Actor.stop();
 
@@ -508,7 +507,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
         // Verify a snapshot is not triggered.
         CaptureSnapshot captureSnapshot = MessageCollectorActor.getFirstMatching(leaderCollectorActor,
                 CaptureSnapshot.class);
-        Assert.assertNull("Leader received unexpected CaptureSnapshot", captureSnapshot);
+        assertNull("Leader received unexpected CaptureSnapshot", captureSnapshot);
 
         expSnapshotState.add(payload1);
 
@@ -581,7 +580,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
         verifyApplyState(applyState, leaderCollectorActor, payload3.toString(), currentTerm, 3, payload3);
 
         captureSnapshot = MessageCollectorActor.getFirstMatching(leaderCollectorActor, CaptureSnapshot.class);
-        Assert.assertNull("Leader received unexpected CaptureSnapshot", captureSnapshot);
+        assertNull("Leader received unexpected CaptureSnapshot", captureSnapshot);
 
         // Verify the follower 1 applies the state.
         applyState = MessageCollectorActor.expectFirstMatching(follower1CollectorActor, ApplyState.class);
@@ -613,8 +612,8 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
     /**
      * Resume the lagging follower 2 and verify it receives an install snapshot from the leader.
      */
-    private void verifyInstallSnapshotToLaggingFollower(long lastAppliedIndex,
-            @Nullable ServerConfigurationPayload expServerConfig) {
+    private void verifyInstallSnapshotToLaggingFollower(final long lastAppliedIndex,
+            final @Nullable ServerConfigurationPayload expServerConfig) {
         testLog.info("verifyInstallSnapshotToLaggingFollower starting");
 
         MessageCollectorActor.clearMessages(leaderCollectorActor);
@@ -633,15 +632,15 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
         // This is OK - the next snapshot should delete it. In production, even if the system restarted
         // before another snapshot, they would both get applied which wouldn't hurt anything.
         List<Snapshot> persistedSnapshots = InMemorySnapshotStore.getSnapshots(leaderId, Snapshot.class);
-        Assert.assertTrue("Expected at least 1 persisted snapshots", persistedSnapshots.size() > 0);
+        assertFalse("Expected at least 1 persisted snapshots", persistedSnapshots.isEmpty());
         Snapshot persistedSnapshot = persistedSnapshots.get(persistedSnapshots.size() - 1);
         verifySnapshot("Persisted", persistedSnapshot, currentTerm, lastAppliedIndex, currentTerm, lastAppliedIndex);
         List<ReplicatedLogEntry> unAppliedEntry = persistedSnapshot.getUnAppliedEntries();
         assertEquals("Persisted Snapshot getUnAppliedEntries size", 0, unAppliedEntry.size());
 
         int snapshotSize = SerializationUtils.serialize(persistedSnapshot.getState()).length;
-        final int expTotalChunks = snapshotSize / SNAPSHOT_CHUNK_SIZE
-                + (snapshotSize % SNAPSHOT_CHUNK_SIZE > 0 ? 1 : 0);
+        final int expTotalChunks = snapshotSize / MAXIMUM_MESSAGE_SLICE_SIZE
+                + (snapshotSize % MAXIMUM_MESSAGE_SLICE_SIZE > 0 ? 1 : 0);
 
         InstallSnapshot installSnapshot = MessageCollectorActor.expectFirstMatching(follower2CollectorActor,
                 InstallSnapshot.class);
@@ -660,7 +659,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
             assertEquals("InstallSnapshotReply getTerm", currentTerm, installSnapshotReply.getTerm());
             assertEquals("InstallSnapshotReply getChunkIndex", index++, installSnapshotReply.getChunkIndex());
             assertEquals("InstallSnapshotReply getFollowerId", follower2Id, installSnapshotReply.getFollowerId());
-            assertEquals("InstallSnapshotReply isSuccess", true, installSnapshotReply.isSuccess());
+            assertTrue("InstallSnapshotReply isSuccess", installSnapshotReply.isSuccess());
         }
 
         // Verify follower 2 applies the snapshot.
@@ -683,18 +682,18 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
         verifyLeadersTrimmedLog(lastAppliedIndex);
 
         if (expServerConfig != null) {
-            Set<ServerInfo> expServerInfo = new HashSet<>(expServerConfig.getServerConfig());
+            Set<ServerInfo> expServerInfo = Set.copyOf(expServerConfig.getServerConfig());
             assertEquals("Leader snapshot server config", expServerInfo,
-                    new HashSet<>(persistedSnapshot.getServerConfiguration().getServerConfig()));
+                Set.copyOf(persistedSnapshot.getServerConfiguration().getServerConfig()));
 
             assertEquals("Follower 2 snapshot server config", expServerInfo,
-                    new HashSet<>(applySnapshot.getSnapshot().getServerConfiguration().getServerConfig()));
+                Set.copyOf(applySnapshot.getSnapshot().getServerConfiguration().getServerConfig()));
 
             ServerConfigurationPayload follower2ServerConfig = follower2Context.getPeerServerInfo(true);
             assertNotNull("Follower 2 server config is null", follower2ServerConfig);
 
             assertEquals("Follower 2 server config", expServerInfo,
-                    new HashSet<>(follower2ServerConfig.getServerConfig()));
+                Set.copyOf(follower2ServerConfig.getServerConfig()));
         }
 
         MessageCollectorActor.clearMessages(leaderCollectorActor);
@@ -765,8 +764,9 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
         // Verify the leaders's persisted journal log - it should only contain the last 2 ReplicatedLogEntries
         // added after the snapshot as the persisted journal should've been purged to the snapshot
         // sequence number.
-        verifyPersistedJournal(leaderId, Arrays.asList(new SimpleReplicatedLogEntry(5, currentTerm, payload5),
-                new SimpleReplicatedLogEntry(6, currentTerm, payload6)));
+        verifyPersistedJournal(leaderId, List.of(
+            new SimpleReplicatedLogEntry(5, currentTerm, payload5),
+            new SimpleReplicatedLogEntry(6, currentTerm, payload6)));
 
         // Verify the leaders's persisted journal contains an ApplyJournalEntries for at least the last entry index.
         List<ApplyJournalEntries> persistedApplyJournalEntries =
@@ -779,8 +779,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
             }
         }
 
-        Assert.assertTrue(String.format("ApplyJournalEntries with index %d not found in leader's persisted journal", 6),
-                found);
+        assertTrue("ApplyJournalEntries with index 6 not found in leader's persisted journal", found);
 
         // Verify follower 1 applies the 3 log entries.
         applyStates = MessageCollectorActor.expectMatching(follower1CollectorActor, ApplyState.class, 3);
@@ -811,8 +810,8 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
     /**
      * Kill the leader actor, reinstate it and verify the recovered journal.
      */
-    private void verifyLeaderRecoveryAfterReinstatement(long lastIndex, long snapshotIndex,
-            long firstJournalEntryIndex) {
+    private void verifyLeaderRecoveryAfterReinstatement(final long lastIndex, final long snapshotIndex,
+            final long firstJournalEntryIndex) {
         testLog.info("verifyLeaderRecoveryAfterReinstatement starting: lastIndex: {}, snapshotIndex: {}, "
             + "firstJournalEntryIndex: {}", lastIndex, snapshotIndex, firstJournalEntryIndex);
 
@@ -845,8 +844,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
         testLog.info("verifyLeaderRecoveryAfterReinstatement ending");
     }
 
-    private void sendInitialPayloadsReplicatedToAllFollowers(String... data) {
-
+    private void sendInitialPayloadsReplicatedToAllFollowers(final String... data) {
         // Send the payloads.
         for (String d: data) {
             expSnapshotState.add(sendPayloadData(leaderActor, d));
@@ -855,25 +853,27 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
         int numEntries = data.length;
 
         // Verify the leader got consensus and applies each log entry even though follower 2 didn't respond.
-        List<ApplyState> applyStates = MessageCollectorActor.expectMatching(leaderCollectorActor,
-                ApplyState.class, numEntries);
+        final var leaderStates = MessageCollectorActor.expectMatching(leaderCollectorActor,
+            ApplyState.class, numEntries);
         for (int i = 0; i < expSnapshotState.size(); i++) {
-            MockPayload payload = expSnapshotState.get(i);
-            verifyApplyState(applyStates.get(i), leaderCollectorActor, payload.toString(), currentTerm, i, payload);
+            final MockPayload payload = expSnapshotState.get(i);
+            verifyApplyState(leaderStates.get(i), leaderCollectorActor, payload.toString(), currentTerm, i, payload);
         }
 
         // Verify follower 1 applies each log entry.
-        applyStates = MessageCollectorActor.expectMatching(follower1CollectorActor, ApplyState.class, numEntries);
+        final var follower1States = MessageCollectorActor.expectMatching(follower1CollectorActor,
+            ApplyState.class, numEntries);
         for (int i = 0; i < expSnapshotState.size(); i++) {
-            MockPayload payload = expSnapshotState.get(i);
-            verifyApplyState(applyStates.get(i), null, null, currentTerm, i, payload);
+            final MockPayload payload = expSnapshotState.get(i);
+            verifyApplyState(follower1States.get(i), null, null, currentTerm, i, payload);
         }
 
         // Verify follower 2 applies each log entry.
-        applyStates = MessageCollectorActor.expectMatching(follower2CollectorActor, ApplyState.class, numEntries);
+        final var follower2States = MessageCollectorActor.expectMatching(follower2CollectorActor,
+            ApplyState.class, numEntries);
         for (int i = 0; i < expSnapshotState.size(); i++) {
-            MockPayload payload = expSnapshotState.get(i);
-            verifyApplyState(applyStates.get(i), null, null, currentTerm, i, payload);
+            final MockPayload payload = expSnapshotState.get(i);
+            verifyApplyState(follower2States.get(i), null, null, currentTerm, i, payload);
         }
 
         // Ensure there's at least 1 more heartbeat.
index fda95eaa1d2f4a254240fe24aea6456fb32de767..dcbc8179a6e4e62dcd93cd31a2c4166b8743d10b 100644 (file)
@@ -9,8 +9,8 @@ package org.opendaylight.controller.cluster.raft;
 
 import static org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor.expectMatching;
 
-import com.google.common.collect.ImmutableMap;
 import java.util.List;
+import java.util.Map;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
 import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
@@ -29,19 +29,19 @@ public class ReplicationWithSlicedPayloadIntegrationTest extends AbstractRaftAct
 
         // Create the leader and 2 follower actors.
 
-        snapshotChunkSize = 20;
+        maximumMessageSliceSize = 20;
 
         DefaultConfigParamsImpl followerConfigParams = newFollowerConfigParams();
         followerConfigParams.setSnapshotBatchCount(snapshotBatchCount);
-        follower1Actor = newTestRaftActor(follower1Id, ImmutableMap.of(leaderId, testActorPath(leaderId),
+        follower1Actor = newTestRaftActor(follower1Id, Map.of(leaderId, testActorPath(leaderId),
                 follower2Id, testActorPath(follower2Id)), followerConfigParams);
 
-        follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId),
+        follower2Actor = newTestRaftActor(follower2Id, Map.of(leaderId, testActorPath(leaderId),
                 follower1Id, testActorPath(follower1Id)), followerConfigParams);
 
-        peerAddresses = ImmutableMap.<String, String>builder()
-                .put(follower1Id, follower1Actor.path().toString())
-                .put(follower2Id, follower2Actor.path().toString()).build();
+        peerAddresses = Map.of(
+                follower1Id, follower1Actor.path().toString(),
+                follower2Id, follower2Actor.path().toString());
 
         leaderConfigParams = newLeaderConfigParams();
         leaderActor = newTestRaftActor(leaderId, peerAddresses, leaderConfigParams);
@@ -58,11 +58,11 @@ public class ReplicationWithSlicedPayloadIntegrationTest extends AbstractRaftAct
 
         // Send a large payload that exceeds the size threshold and needs to be sliced.
 
-        MockPayload largePayload = sendPayloadData(leaderActor, "large", snapshotChunkSize + 1);
+        MockPayload largePayload = sendPayloadData(leaderActor, "large", maximumMessageSliceSize + 1);
 
         // Then send a small payload that does not need to be sliced.
 
-        MockPayload smallPayload = sendPayloadData(leaderActor, "normal", snapshotChunkSize - 1);
+        MockPayload smallPayload = sendPayloadData(leaderActor, "normal", maximumMessageSliceSize - 1);
 
         final List<ApplyState> leaderApplyState = expectMatching(leaderCollectorActor, ApplyState.class, 2);
         verifyApplyState(leaderApplyState.get(0), leaderCollectorActor,
index 9463a113e6a9c369904aa3f3fe055a9dcd88635d..aa4a44b97c13803d543ae553036e77ad72b7541c 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.raft;
 
 import static org.junit.Assert.assertArrayEquals;
@@ -26,7 +25,7 @@ import static org.mockito.Mockito.verify;
 import akka.actor.ActorRef;
 import akka.persistence.SnapshotSelectionCriteria;
 import java.io.OutputStream;
-import java.util.Arrays;
+import java.util.List;
 import java.util.Optional;
 import java.util.function.Consumer;
 import org.junit.After;
@@ -257,7 +256,7 @@ public class SnapshotManagerTest extends AbstractActorTest {
                 8L, 2L, new MockRaftActorContext.MockPayload());
 
         doReturn(lastAppliedEntry).when(mockReplicatedLog).get(8L);
-        doReturn(Arrays.asList(lastLogEntry)).when(mockReplicatedLog).getFrom(9L);
+        doReturn(List.of(lastLogEntry)).when(mockReplicatedLog).getFrom(9L);
 
         // when replicatedToAllIndex = -1
         snapshotManager.capture(lastLogEntry, -1);
@@ -275,7 +274,7 @@ public class SnapshotManagerTest extends AbstractActorTest {
         assertEquals("getLastAppliedTerm", 2L, snapshot.getLastAppliedTerm());
         assertEquals("getLastAppliedIndex", 8L, snapshot.getLastAppliedIndex());
         assertEquals("getState", snapshotState, snapshot.getState());
-        assertEquals("getUnAppliedEntries", Arrays.asList(lastLogEntry), snapshot.getUnAppliedEntries());
+        assertEquals("getUnAppliedEntries", List.of(lastLogEntry), snapshot.getUnAppliedEntries());
         assertEquals("electionTerm", mockElectionTerm.getCurrentTerm(), snapshot.getElectionTerm());
         assertEquals("electionVotedFor", mockElectionTerm.getVotedFor(), snapshot.getElectionVotedFor());
 
@@ -378,7 +377,7 @@ public class SnapshotManagerTest extends AbstractActorTest {
         Optional<OutputStream> installSnapshotStream = installSnapshotStreamCapture.getValue();
         assertEquals("isPresent", true, installSnapshotStream.isPresent());
 
-        installSnapshotStream.get().write(snapshotState.getBytes());
+        installSnapshotStream.orElseThrow().write(snapshotState.getBytes());
 
         snapshotManager.persist(snapshotState, installSnapshotStream, Runtime.getRuntime().totalMemory());
 
index 50a3c98131a4f06bbde84a5346386d74e379de0e..96f4fe8c6e5172126291ed7358526b403d4a25f2 100644 (file)
@@ -7,6 +7,8 @@
  */
 package org.opendaylight.controller.cluster.raft;
 
+import static org.junit.Assert.assertTrue;
+
 import akka.actor.Actor;
 import akka.actor.ActorIdentity;
 import akka.actor.ActorRef;
@@ -23,10 +25,9 @@ import akka.util.Timeout;
 import com.google.common.base.Stopwatch;
 import com.google.common.util.concurrent.Uninterruptibles;
 import java.time.Duration;
-import java.util.LinkedList;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
-import org.junit.Assert;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.Await;
@@ -49,7 +50,7 @@ public class TestActorFactory implements AutoCloseable {
     private static final Logger LOG = LoggerFactory.getLogger(TestActorFactory.class);
 
     private final ActorSystem system;
-    List<ActorRef> createdActors = new LinkedList<>();
+    private final List<ActorRef> createdActors = new ArrayList<>();
     private static int actorCount = 1;
 
     public TestActorFactory(final ActorSystem system) {
@@ -152,7 +153,7 @@ public class TestActorFactory implements AutoCloseable {
                 ActorSelection actorSelection = system.actorSelection(actorRef.path().toString());
                 Future<Object> future = Patterns.ask(actorSelection, new Identify(""), timeout);
                 ActorIdentity reply = (ActorIdentity)Await.result(future, timeout.duration());
-                Assert.assertTrue("Identify returned non-present", reply.getActorRef().isPresent());
+                assertTrue("Identify returned non-present", reply.getActorRef().isPresent());
                 return;
             } catch (Exception | AssertionError e) {
                 Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
index 26cdb22d8ccc42569dc04bddf4d5ad9da7bb6cb9..2a58dd1d4f15aa0be51c1dfe536de028fe407a2c 100644 (file)
@@ -7,9 +7,10 @@
  */
 package org.opendaylight.controller.cluster.raft.base.messages;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertSame;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -18,10 +19,11 @@ import org.junit.Test;
  * @author Thomas Pantelis
  */
 public class TimeoutNowTest {
-
     @Test
     public void test() {
-        TimeoutNow cloned = (TimeoutNow) SerializationUtils.clone(TimeoutNow.INSTANCE);
+        final var bytes = SerializationUtils.serialize(TimeoutNow.INSTANCE);
+        assertEquals(86, bytes.length);
+        final var cloned = SerializationUtils.deserialize(bytes);
         assertSame("Cloned instance", TimeoutNow.INSTANCE, cloned);
     }
 }
index 7695d05133e6cd18e4d97a35282d9931995a2f01..3497840b386d650600d7926fac0d421d28fdb1d8 100644 (file)
@@ -32,12 +32,12 @@ import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
 import org.opendaylight.controller.cluster.raft.TestActorFactory;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
 import org.opendaylight.controller.cluster.raft.messages.RequestVote;
 import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
 import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry;
 import org.opendaylight.controller.cluster.raft.policy.RaftPolicy;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
 import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
 import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
index a9305a6862f32f3737a194ad6ef601f6e77baff0..d2aa7d013cc5ac374291192d841bbbf81c9b7ab9 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.behaviors;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -18,11 +18,12 @@ import org.junit.Test;
  * @author Thomas Pantelis
  */
 public class FollowerIdentifierTest {
-
     @Test
     public void testSerialization() {
-        FollowerIdentifier expected = new FollowerIdentifier("follower1");
-        FollowerIdentifier cloned = (FollowerIdentifier) SerializationUtils.clone(expected);
+        final var expected = new FollowerIdentifier("follower1");
+        final var bytes = SerializationUtils.serialize(expected);
+        assertEquals(87, bytes.length);
+        final var cloned = (FollowerIdentifier) SerializationUtils.deserialize(bytes);
         assertEquals("cloned", expected, cloned);
     }
 }
index a51134676a16ff81477eae2a5f092b54f684ff8b..8006d5a6ac906ed6a11598b59f5c6aba6269f109 100644 (file)
@@ -7,6 +7,7 @@
  */
 package org.opendaylight.controller.cluster.raft.behaviors;
 
+import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -25,21 +26,15 @@ import akka.protobuf.ByteString;
 import akka.testkit.TestActorRef;
 import akka.testkit.javadsl.TestKit;
 import com.google.common.base.Stopwatch;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
 import com.google.common.io.ByteSource;
 import com.google.common.util.concurrent.Uninterruptibles;
 import java.io.OutputStream;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.Optional;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
 import org.opendaylight.controller.cluster.raft.MockRaftActor;
@@ -157,7 +152,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         Uninterruptibles.sleepUninterruptibly(context.getConfigParams()
                 .getElectionTimeOutInterval().toMillis() - 100, TimeUnit.MILLISECONDS);
-        follower.handleMessage(leaderActor, new AppendEntries(1, "leader", -1, -1, Collections.emptyList(),
+        follower.handleMessage(leaderActor, new AppendEntries(1, "leader", -1, -1, List.of(),
                 -1, -1, (short) 1));
 
         Uninterruptibles.sleepUninterruptibly(130, TimeUnit.MILLISECONDS);
@@ -166,7 +161,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         Uninterruptibles.sleepUninterruptibly(context.getConfigParams()
                 .getElectionTimeOutInterval().toMillis() - 150, TimeUnit.MILLISECONDS);
-        follower.handleMessage(leaderActor, new AppendEntries(1, "leader", -1, -1, Collections.emptyList(),
+        follower.handleMessage(leaderActor, new AppendEntries(1, "leader", -1, -1, List.of(),
                 -1, -1, (short) 1));
 
         Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
@@ -221,10 +216,9 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         context.getReplicatedLog().append(newReplicatedLogEntry(1,100, "bar"));
         context.getReplicatedLog().setSnapshotIndex(99);
 
-        List<ReplicatedLogEntry> entries = Arrays.asList(
-                newReplicatedLogEntry(2, 101, "foo"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
-        Assert.assertEquals(1, context.getReplicatedLog().size());
+        assertEquals(1, context.getReplicatedLog().size());
 
         // The new commitIndex is 101
         AppendEntries appendEntries = new AppendEntries(2, "leader-1", 100, 1, entries, 101, 100, (short)0);
@@ -246,8 +240,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         MockRaftActorContext context = createActorContext();
 
-        List<ReplicatedLogEntry> entries = Arrays.asList(
-                newReplicatedLogEntry(2, 101, "foo"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
         // The new commitIndex is 101
         AppendEntries appendEntries = new AppendEntries(2, "leader-1", -1, -1, entries, 101, 100, (short) 0);
@@ -272,8 +265,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         context.getReplicatedLog().append(newReplicatedLogEntry(1, 100, "bar"));
         context.getReplicatedLog().setSnapshotIndex(99);
 
-        List<ReplicatedLogEntry> entries = Arrays.asList(
-                newReplicatedLogEntry(2, 101, "foo"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
         // The new commitIndex is 101
         AppendEntries appendEntries = new AppendEntries(2, "leader-1", -1, -1, entries, 101, 100, (short) 0);
@@ -297,8 +289,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         context.getReplicatedLog().clear(0,2);
         context.getReplicatedLog().setSnapshotIndex(100);
 
-        List<ReplicatedLogEntry> entries = Arrays.asList(
-                newReplicatedLogEntry(2, 101, "foo"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
         // The new commitIndex is 101
         AppendEntries appendEntries = new AppendEntries(2, "leader-1", -1, -1, entries, 101, 100, (short) 0);
@@ -323,8 +314,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         context.getReplicatedLog().clear(0,2);
         context.getReplicatedLog().setSnapshotIndex(100);
 
-        List<ReplicatedLogEntry> entries = Arrays.asList(
-                newReplicatedLogEntry(2, 105, "foo"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 105, "foo"));
 
         // The new commitIndex is 101
         AppendEntries appendEntries = new AppendEntries(2, "leader-1", -1, -1, entries, 105, 100, (short) 0);
@@ -346,8 +336,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         MockRaftActorContext context = createActorContext();
 
-        List<ReplicatedLogEntry> entries = Arrays.asList(
-                newReplicatedLogEntry(2, 101, "foo"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
         // The new commitIndex is 101
         AppendEntries appendEntries = new AppendEntries(2, "leader-1", 100, 1, entries, 101, 100, (short)0);
@@ -367,7 +356,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         context.setCommitIndex(101);
         setLastLogEntry(context, 1, 101, new MockRaftActorContext.MockPayload(""));
 
-        entries = Arrays.asList(newReplicatedLogEntry(2, 101, "foo"));
+        entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
         // The new commitIndex is 101
         appendEntries = new AppendEntries(2, "leader-1", 101, 1, entries, 102, 101, (short)0);
@@ -394,8 +383,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         MockRaftActorContext context = createActorContext();
 
-        List<ReplicatedLogEntry> entries = Arrays.asList(
-                newReplicatedLogEntry(2, 101, "foo"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
         // The new commitIndex is 101
         AppendEntries appendEntries = new AppendEntries(2, "leader-1", 100, 1, entries, 101, 100, (short)0);
@@ -415,8 +403,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         setLastLogEntry(context, 1, 100,
                 new MockRaftActorContext.MockPayload(""));
 
-        entries = Arrays.asList(
-                newReplicatedLogEntry(2, 101, "foo"));
+        entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
         // leader-2 is becoming the leader now and it says the commitIndex is 45
         appendEntries = new AppendEntries(2, "leader-2", 45, 1, entries, 46, 100, (short)0);
@@ -434,8 +421,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         MockRaftActorContext context = createActorContext();
 
-        List<ReplicatedLogEntry> entries = Arrays.asList(
-                newReplicatedLogEntry(2, 101, "foo"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
         // The new commitIndex is 101
         AppendEntries appendEntries = new AppendEntries(2, "leader-1", 100, 1, entries, 101, 100, (short)0);
@@ -456,8 +442,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         setLastLogEntry(context, 1, 101,
                 new MockRaftActorContext.MockPayload(""));
 
-        entries = Arrays.asList(
-                newReplicatedLogEntry(2, 101, "foo"));
+        entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
         // The new commitIndex is 101
         appendEntries = new AppendEntries(2, "leader-1", 101, 1, entries, 102, 101, (short)0);
@@ -474,8 +459,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         setLastLogEntry(context, 1, 100,
                 new MockRaftActorContext.MockPayload(""));
 
-        entries = Arrays.asList(
-                newReplicatedLogEntry(2, 101, "foo"));
+        entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
         // leader-2 is becoming the leader now and it says the commitIndex is 45
         appendEntries = new AppendEntries(2, "leader-2", 45, 1, entries, 46, 100, (short)0);
@@ -504,8 +488,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
                 new MockRaftActorContext.MockPayload(""));
         context.getReplicatedLog().setSnapshotIndex(99);
 
-        List<ReplicatedLogEntry> entries = Arrays.<ReplicatedLogEntry>asList(
-                newReplicatedLogEntry(2, 101, "foo"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
         // The new commitIndex is 101
         AppendEntries appendEntries = new AppendEntries(2, "leader-1", 100, 1, entries, 101, 100, (short)0);
@@ -527,13 +510,13 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         MockRaftActorContext context = createActorContext();
 
-        AppendEntries appendEntries = new AppendEntries(2, "leader", 0, 2, Collections.emptyList(), 101, -1, (short)0);
+        AppendEntries appendEntries = new AppendEntries(2, "leader", 0, 2, List.of(), 101, -1, (short)0);
 
         follower = createBehavior(context);
 
         RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
 
-        Assert.assertSame(follower, newBehavior);
+        assertSame(follower, newBehavior);
 
         AppendEntriesReply reply = MessageCollectorActor.expectFirstMatching(leaderActor,
                 AppendEntriesReply.class);
@@ -550,13 +533,13 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         context.getReplicatedLog().setSnapshotIndex(4);
         context.getReplicatedLog().setSnapshotTerm(3);
 
-        AppendEntries appendEntries = new AppendEntries(3, "leader", 1, 3, Collections.emptyList(), 8, -1, (short)0);
+        AppendEntries appendEntries = new AppendEntries(3, "leader", 1, 3, List.of(), 8, -1, (short)0);
 
         follower = createBehavior(context);
 
         RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
 
-        Assert.assertSame(follower, newBehavior);
+        assertSame(follower, newBehavior);
 
         AppendEntriesReply reply = MessageCollectorActor.expectFirstMatching(leaderActor, AppendEntriesReply.class);
 
@@ -587,9 +570,8 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         context.setReplicatedLog(log);
 
         // Prepare the entries to be sent with AppendEntries
-        List<ReplicatedLogEntry> entries = new ArrayList<>();
-        entries.add(newReplicatedLogEntry(1, 3, "three"));
-        entries.add(newReplicatedLogEntry(1, 4, "four"));
+        List<ReplicatedLogEntry> entries = List.of(
+            newReplicatedLogEntry(1, 3, "three"), newReplicatedLogEntry(1, 4, "four"));
 
         // Send appendEntries with the same term as was set on the receiver
         // before the new behavior was created (1 in this case)
@@ -603,7 +585,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
 
-        Assert.assertSame(follower, newBehavior);
+        assertSame(follower, newBehavior);
 
         assertEquals("Next index", 5, log.last().getIndex() + 1);
         assertEquals("Entry 3", entries.get(0), log.get(3));
@@ -639,9 +621,8 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         context.setReplicatedLog(log);
 
         // Prepare the entries to be sent with AppendEntries
-        List<ReplicatedLogEntry> entries = new ArrayList<>();
-        entries.add(newReplicatedLogEntry(2, 2, "two-1"));
-        entries.add(newReplicatedLogEntry(2, 3, "three"));
+        List<ReplicatedLogEntry> entries = List.of(
+            newReplicatedLogEntry(2, 2, "two-1"), newReplicatedLogEntry(2, 3, "three"));
 
         // Send appendEntries with the same term as was set on the receiver
         // before the new behavior was created (1 in this case)
@@ -653,7 +634,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
 
-        Assert.assertSame(follower, newBehavior);
+        assertSame(follower, newBehavior);
 
         // The entry at index 2 will be found out-of-sync with the leader
         // and will be removed
@@ -690,9 +671,8 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         context.setReplicatedLog(log);
 
         // Prepare the entries to be sent with AppendEntries
-        List<ReplicatedLogEntry> entries = new ArrayList<>();
-        entries.add(newReplicatedLogEntry(2, 2, "two-1"));
-        entries.add(newReplicatedLogEntry(2, 3, "three"));
+        List<ReplicatedLogEntry> entries = List.of(
+            newReplicatedLogEntry(2, 2, "two-1"), newReplicatedLogEntry(2, 3, "three"));
 
         // Send appendEntries with the same term as was set on the receiver
         // before the new behavior was created (1 in this case)
@@ -705,7 +685,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
 
-        Assert.assertSame(follower, newBehavior);
+        assertSame(follower, newBehavior);
 
         expectAndVerifyAppendEntriesReply(2, false, context.getId(), 1, 2, true);
     }
@@ -725,8 +705,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         context.setReplicatedLog(log);
 
         // Prepare the entries to be sent with AppendEntries
-        List<ReplicatedLogEntry> entries = new ArrayList<>();
-        entries.add(newReplicatedLogEntry(1, 4, "four"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(1, 4, "four"));
 
         AppendEntries appendEntries = new AppendEntries(1, "leader", 3, 1, entries, 4, -1, (short)0);
 
@@ -734,7 +713,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
 
-        Assert.assertSame(follower, newBehavior);
+        assertSame(follower, newBehavior);
 
         expectAndVerifyAppendEntriesReply(1, false, context.getId(), 1, 2);
     }
@@ -755,7 +734,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         context.setReplicatedLog(log);
 
         // Send the last entry again.
-        List<ReplicatedLogEntry> entries = Arrays.asList(newReplicatedLogEntry(1, 1, "one"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(1, 1, "one"));
 
         follower = createBehavior(context);
 
@@ -768,7 +747,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         // Send the last entry again and also a new one.
 
-        entries = Arrays.asList(newReplicatedLogEntry(1, 1, "one"), newReplicatedLogEntry(1, 2, "two"));
+        entries = List.of(newReplicatedLogEntry(1, 1, "one"), newReplicatedLogEntry(1, 2, "two"));
 
         MessageCollectorActor.clearMessages(leaderActor);
         follower.handleMessage(leaderActor, new AppendEntries(1, "leader", 0, 1, entries, 2, -1, (short)0));
@@ -796,8 +775,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         context.setReplicatedLog(log);
 
         // Prepare the entries to be sent with AppendEntries
-        List<ReplicatedLogEntry> entries = new ArrayList<>();
-        entries.add(newReplicatedLogEntry(1, 4, "four"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(1, 4, "four"));
 
         AppendEntries appendEntries = new AppendEntries(1, "leader", 3, 1, entries, 4, 3, (short)0);
 
@@ -805,7 +783,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
 
-        Assert.assertSame(follower, newBehavior);
+        assertSame(follower, newBehavior);
 
         expectAndVerifyAppendEntriesReply(1, true, context.getId(), 1, 4);
     }
@@ -853,7 +831,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
                 snapshot.getLastAppliedIndex());
         assertEquals("getLastTerm", lastInstallSnapshot.getLastIncludedTerm(), snapshot.getLastTerm());
         assertEquals("getState type", ByteState.class, snapshot.getState().getClass());
-        Assert.assertArrayEquals("getState", bsSnapshot.toByteArray(), ((ByteState)snapshot.getState()).getBytes());
+        assertArrayEquals("getState", bsSnapshot.toByteArray(), ((ByteState)snapshot.getState()).getBytes());
         assertEquals("getElectionTerm", 1, snapshot.getElectionTerm());
         assertEquals("getElectionVotedFor", "leader", snapshot.getElectionVotedFor());
         applySnapshot.getCallback().onSuccess();
@@ -907,7 +885,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         // Send an append entry
         AppendEntries appendEntries = new AppendEntries(1, "leader", 1, 1,
-                Arrays.asList(newReplicatedLogEntry(2, 1, "3")), 2, -1, (short)1);
+                List.of(newReplicatedLogEntry(2, 1, "3")), 2, -1, (short)1);
 
         follower.handleMessage(leaderActor, appendEntries);
 
@@ -950,7 +928,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         // Send appendEntries with a new term and leader.
         AppendEntries appendEntries = new AppendEntries(2, "new-leader", 1, 1,
-                Arrays.asList(newReplicatedLogEntry(2, 2, "3")), 2, -1, (short)1);
+                List.of(newReplicatedLogEntry(2, 2, "3")), 2, -1, (short)1);
 
         follower.handleMessage(leaderActor, appendEntries);
 
@@ -1004,8 +982,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         setLastLogEntry(context, 1, 101,
                 new MockRaftActorContext.MockPayload(""));
 
-        List<ReplicatedLogEntry> entries = Arrays.asList(
-                newReplicatedLogEntry(2, 101, "foo"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
         // The new commitIndex is 101
         AppendEntries appendEntries = new AppendEntries(2, "leader", 101, 1, entries, 102, 101, (short)0);
@@ -1081,7 +1058,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
     @Test
     public void testFollowerSchedulesElectionIfNonVoting() {
         MockRaftActorContext context = createActorContext();
-        context.updatePeerIds(new ServerConfigurationPayload(Arrays.asList(new ServerInfo(context.getId(), false))));
+        context.updatePeerIds(new ServerConfigurationPayload(List.of(new ServerInfo(context.getId(), false))));
         ((DefaultConfigParamsImpl)context.getConfigParams()).setHeartBeatInterval(
                 FiniteDuration.apply(100, TimeUnit.MILLISECONDS));
         ((DefaultConfigParamsImpl)context.getConfigParams()).setElectionTimeoutFactor(1);
@@ -1132,7 +1109,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         final AtomicReference<MockRaftActor> followerRaftActor = new AtomicReference<>();
         RaftActorSnapshotCohort snapshotCohort = newRaftActorSnapshotCohort(followerRaftActor);
         Builder builder = MockRaftActor.builder().persistent(Optional.of(true)).id(id)
-                .peerAddresses(ImmutableMap.of("leader", "")).config(config).snapshotCohort(snapshotCohort);
+                .peerAddresses(Map.of("leader", "")).config(config).snapshotCohort(snapshotCohort);
         TestActorRef<MockRaftActor> followerActorRef = actorFactory.createTestActor(builder.props()
                 .withDispatcher(Dispatchers.DefaultDispatcherId()), id);
         followerRaftActor.set(followerActorRef.underlyingActor());
@@ -1142,7 +1119,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         InMemoryJournal.addDeleteMessagesCompleteLatch(id);
         InMemoryJournal.addWriteMessagesCompleteLatch(id, 1, ApplyJournalEntries.class);
 
-        List<ReplicatedLogEntry> entries = Arrays.asList(
+        List<ReplicatedLogEntry> entries = List.of(
                 newReplicatedLogEntry(1, 0, "one"), newReplicatedLogEntry(1, 1, "two"));
 
         AppendEntries appendEntries = new AppendEntries(1, "leader", -1, -1, entries, 1, -1, (short)0);
@@ -1169,7 +1146,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         assertEquals("Snapshot getLastAppliedIndex", 1, snapshot.getLastAppliedIndex());
         assertEquals("Snapshot getLastTerm", 1, snapshot.getLastTerm());
         assertEquals("Snapshot getLastIndex", 1, snapshot.getLastIndex());
-        assertEquals("Snapshot state", ImmutableList.of(entries.get(0).getData(), entries.get(1).getData()),
+        assertEquals("Snapshot state", List.of(entries.get(0).getData(), entries.get(1).getData()),
                 MockRaftActor.fromState(snapshot.getState()));
     }
 
@@ -1187,7 +1164,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         final AtomicReference<MockRaftActor> followerRaftActor = new AtomicReference<>();
         RaftActorSnapshotCohort snapshotCohort = newRaftActorSnapshotCohort(followerRaftActor);
         Builder builder = MockRaftActor.builder().persistent(Optional.of(true)).id(id)
-                .peerAddresses(ImmutableMap.of("leader", "")).config(config).snapshotCohort(snapshotCohort);
+                .peerAddresses(Map.of("leader", "")).config(config).snapshotCohort(snapshotCohort);
         TestActorRef<MockRaftActor> followerActorRef = actorFactory.createTestActor(builder.props()
                 .withDispatcher(Dispatchers.DefaultDispatcherId()), id);
         followerRaftActor.set(followerActorRef.underlyingActor());
@@ -1197,7 +1174,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         InMemoryJournal.addDeleteMessagesCompleteLatch(id);
         InMemoryJournal.addWriteMessagesCompleteLatch(id, 1, ApplyJournalEntries.class);
 
-        List<ReplicatedLogEntry> entries = Arrays.asList(
+        List<ReplicatedLogEntry> entries = List.of(
                 newReplicatedLogEntry(1, 0, "one"), newReplicatedLogEntry(1, 1, "two"),
                 newReplicatedLogEntry(1, 2, "three"));
 
@@ -1225,7 +1202,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         assertEquals("Snapshot getLastAppliedIndex", 2, snapshot.getLastAppliedIndex());
         assertEquals("Snapshot getLastTerm", 1, snapshot.getLastTerm());
         assertEquals("Snapshot getLastIndex", 2, snapshot.getLastIndex());
-        assertEquals("Snapshot state", ImmutableList.of(entries.get(0).getData(), entries.get(1).getData(),
+        assertEquals("Snapshot state", List.of(entries.get(0).getData(), entries.get(1).getData(),
                 entries.get(2).getData()), MockRaftActor.fromState(snapshot.getState()));
 
         assertEquals("Journal size", 0, followerRaftActor.get().getReplicatedLog().size());
@@ -1244,7 +1221,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         assertEquals("Last index", 2, followerRaftActor.get().getReplicatedLog().lastIndex());
         assertEquals("Last applied index", 2, followerRaftActor.get().getRaftActorContext().getLastApplied());
         assertEquals("Commit index", 2, followerRaftActor.get().getRaftActorContext().getCommitIndex());
-        assertEquals("State", ImmutableList.of(entries.get(0).getData(), entries.get(1).getData(),
+        assertEquals("State", List.of(entries.get(0).getData(), entries.get(1).getData(),
                 entries.get(2).getData()), followerRaftActor.get().getState());
     }
 
@@ -1262,7 +1239,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         final AtomicReference<MockRaftActor> followerRaftActor = new AtomicReference<>();
         RaftActorSnapshotCohort snapshotCohort = newRaftActorSnapshotCohort(followerRaftActor);
         Builder builder = MockRaftActor.builder().persistent(Optional.of(true)).id(id)
-                .peerAddresses(ImmutableMap.of("leader", "")).config(config).snapshotCohort(snapshotCohort);
+                .peerAddresses(Map.of("leader", "")).config(config).snapshotCohort(snapshotCohort);
         TestActorRef<MockRaftActor> followerActorRef = actorFactory.createTestActor(builder.props()
                 .withDispatcher(Dispatchers.DefaultDispatcherId()), id);
         followerRaftActor.set(followerActorRef.underlyingActor());
@@ -1272,7 +1249,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         InMemoryJournal.addDeleteMessagesCompleteLatch(id);
         InMemoryJournal.addWriteMessagesCompleteLatch(id, 1, ApplyJournalEntries.class);
 
-        List<ReplicatedLogEntry> entries = Arrays.asList(
+        List<ReplicatedLogEntry> entries = List.of(
                 newReplicatedLogEntry(1, 0, "one"), newReplicatedLogEntry(1, 1, "two"),
                 newReplicatedLogEntry(1, 2, "three"));
 
@@ -1302,7 +1279,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         assertEquals("Snapshot getLastAppliedIndex", 0, snapshot.getLastAppliedIndex());
         assertEquals("Snapshot getLastTerm", 1, snapshot.getLastTerm());
         assertEquals("Snapshot getLastIndex", 2, snapshot.getLastIndex());
-        assertEquals("Snapshot state", ImmutableList.of(entries.get(0).getData()),
+        assertEquals("Snapshot state", List.of(entries.get(0).getData()),
                 MockRaftActor.fromState(snapshot.getState()));
     }
 
@@ -1318,7 +1295,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         follower = createBehavior(context);
 
         follower.handleMessage(leaderActor,
-                new AppendEntries(1, "leader", -1, -1, Collections.emptyList(), -1, -1, (short)0));
+                new AppendEntries(1, "leader", -1, -1, List.of(), -1, -1, (short)0));
 
         AppendEntriesReply reply = MessageCollectorActor.expectFirstMatching(leaderActor, AppendEntriesReply.class);
         assertTrue(reply.isNeedsLeaderAddress());
@@ -1327,7 +1304,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         PeerAddressResolver mockResolver = mock(PeerAddressResolver.class);
         ((DefaultConfigParamsImpl)context.getConfigParams()).setPeerAddressResolver(mockResolver);
 
-        follower.handleMessage(leaderActor, new AppendEntries(1, "leader", -1, -1, Collections.emptyList(), -1, -1,
+        follower.handleMessage(leaderActor, new AppendEntries(1, "leader", -1, -1, List.of(), -1, -1,
                 (short)0, RaftVersions.CURRENT_VERSION, leaderActor.path().toString()));
 
         reply = MessageCollectorActor.expectFirstMatching(leaderActor, AppendEntriesReply.class);
@@ -1370,10 +1347,8 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         int size = chunkSize;
         if (chunkSize > snapshotLength) {
             size = snapshotLength;
-        } else {
-            if (start + chunkSize > snapshotLength) {
-                size = snapshotLength - start;
-            }
+        } else if (start + chunkSize > snapshotLength) {
+            size = snapshotLength - start;
         }
 
         byte[] nextChunk = new byte[size];
@@ -1410,12 +1385,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
     }
 
     private ByteString createSnapshot() {
-        HashMap<String, String> followerSnapshot = new HashMap<>();
-        followerSnapshot.put("1", "A");
-        followerSnapshot.put("2", "B");
-        followerSnapshot.put("3", "C");
-
-        return toByteString(followerSnapshot);
+        return toByteString(Map.of("1", "A", "2", "B", "3", "C"));
     }
 
     @Override
diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderInstallSnapshotStateTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderInstallSnapshotStateTest.java
new file mode 100644 (file)
index 0000000..aa07181
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2023 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import static org.junit.Assert.assertEquals;
+
+import com.google.common.io.ByteSource;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Arrays;
+import java.util.Objects;
+import org.junit.Test;
+
+public class LeaderInstallSnapshotStateTest {
+    // Prime number on purpose
+    private static final int CHUNK_SIZE = 9_999_991;
+    // More than Integer.MAX_VALUE
+    private static final long SIZE = 4_294_967_294L;
+
+    @Test
+    public void testSnapshotLongerThanInteger() throws IOException {
+        try (var fts = new LeaderInstallSnapshotState(CHUNK_SIZE, "test")) {
+            fts.setSnapshotBytes(new MockByteSource(SIZE));
+
+            int chunkIndex = 0;
+            long offset = 0;
+            long expectedChunkSize = CHUNK_SIZE;
+            while (offset < SIZE) {
+                offset = offset + CHUNK_SIZE;
+                if (offset > SIZE) {
+                    // We reached last chunk
+                    expectedChunkSize = CHUNK_SIZE - (offset - SIZE);
+                    offset = SIZE;
+                }
+                chunkIndex ++;
+                final byte[] chunk = fts.getNextChunk();
+                assertEquals("byte size not matching for chunk:", expectedChunkSize, chunk.length);
+                assertEquals("chunk index not matching", chunkIndex, fts.getChunkIndex());
+                fts.markSendStatus(true);
+                if (!fts.isLastChunk(chunkIndex)) {
+                    fts.incrementChunkIndex();
+                }
+            }
+
+            assertEquals("totalChunks not matching", chunkIndex, fts.getTotalChunks());
+        }
+    }
+
+    private static final class MockByteSource extends ByteSource {
+        private final long size;
+
+        private MockByteSource(final long size) {
+            this.size = size;
+        }
+
+        @Override
+        public long size() {
+            return size;
+        }
+
+        @Override
+        public InputStream openStream() {
+            return new MockInputStream(size);
+        }
+    }
+
+    private static final class MockInputStream extends InputStream {
+        private long remaining;
+
+        MockInputStream(final long size) {
+            remaining = size;
+        }
+
+        @Override
+        public int read() {
+            if (remaining > 0) {
+                remaining--;
+                return 0;
+            }
+            return -1;
+        }
+
+        @Override
+        public int read(final byte[] bytes, final int off, final int len) {
+            Objects.checkFromIndexSize(off, len, bytes.length);
+            if (remaining <= 0) {
+                return -1;
+            }
+            final int count = len <= remaining ? len : (int) remaining;
+            Arrays.fill(bytes, off, off + count, (byte) 0);
+            remaining -= count;
+            return count;
+        }
+    }
+}
index d9a5487e556171813bf47d49af2a18a85b0b165c..0f16f92c4951ef4c018e9aeebe977d8ce447e754 100644 (file)
@@ -25,18 +25,16 @@ import akka.actor.Terminated;
 import akka.protobuf.ByteString;
 import akka.testkit.TestActorRef;
 import akka.testkit.javadsl.TestKit;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
 import com.google.common.io.ByteSource;
 import com.google.common.util.concurrent.Uninterruptibles;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
+import java.util.OptionalInt;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
 import org.apache.commons.lang3.SerializationUtils;
@@ -65,6 +63,7 @@ import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
 import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
 import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
@@ -73,7 +72,6 @@ import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEnt
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
 import org.opendaylight.controller.cluster.raft.policy.DefaultRaftPolicy;
 import org.opendaylight.controller.cluster.raft.policy.RaftPolicy;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.controller.cluster.raft.utils.ForwardMessageToBehaviorActor;
 import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
 import org.opendaylight.yangtools.concepts.Identifier;
@@ -170,9 +168,8 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
     private RaftActorBehavior sendReplicate(final MockRaftActorContext actorContext, final long term, final long index,
             final Payload payload) {
-        SimpleReplicatedLogEntry newEntry = new SimpleReplicatedLogEntry(index, term, payload);
-        actorContext.getReplicatedLog().append(newEntry);
-        return leader.handleMessage(leaderActor, new Replicate(null, null, newEntry, true));
+        actorContext.getReplicatedLog().append(new SimpleReplicatedLogEntry(index, term, payload));
+        return leader.handleMessage(leaderActor, new Replicate(index, true, null, null));
     }
 
     @Test
@@ -403,7 +400,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
             final int messageNr) {
         final AppendEntries commitReq = allMessages.get(2 * messageNr + 1);
         assertEquals(lastIndex + messageNr + 1, commitReq.getLeaderCommit());
-        assertEquals(ImmutableList.of(), commitReq.getEntries());
+        assertEquals(List.of(), commitReq.getEntries());
     }
 
     private static void assertRequestEntry(final long lastIndex, final List<AppendEntries> allMessages,
@@ -548,16 +545,14 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         actorContext.setLastApplied(0);
 
-        long newLogIndex = actorContext.getReplicatedLog().lastIndex() + 1;
-        long term = actorContext.getTermInformation().getCurrentTerm();
-        ReplicatedLogEntry newEntry = new SimpleReplicatedLogEntry(
-                newLogIndex, term, new MockRaftActorContext.MockPayload("foo"));
+        final long newLogIndex = actorContext.getReplicatedLog().lastIndex() + 1;
+        final long term = actorContext.getTermInformation().getCurrentTerm();
+        final var data = new MockRaftActorContext.MockPayload("foo");
 
-        actorContext.getReplicatedLog().append(newEntry);
+        actorContext.getReplicatedLog().append(new SimpleReplicatedLogEntry(newLogIndex, term, data));
 
         final Identifier id = new MockIdentifier("state-id");
-        RaftActorBehavior raftBehavior = leader.handleMessage(leaderActor,
-                new Replicate(leaderActor, id, newEntry, true));
+        final var raftBehavior = leader.handleMessage(leaderActor, new Replicate(newLogIndex, true, leaderActor, id));
 
         // State should not change
         assertTrue(raftBehavior instanceof Leader);
@@ -566,8 +561,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         // We should get 2 ApplyState messages - 1 for new log entry and 1 for the previous
         // one since lastApplied state is 0.
-        List<ApplyState> applyStateList = MessageCollectorActor.getAllMatching(
-                leaderActor, ApplyState.class);
+        final var applyStateList = MessageCollectorActor.getAllMatching(leaderActor, ApplyState.class);
         assertEquals("ApplyState count", newLogIndex, applyStateList.size());
 
         for (int i = 0; i <= newLogIndex - 1; i++) {
@@ -577,7 +571,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         }
 
         ApplyState last = applyStateList.get((int) newLogIndex - 1);
-        assertEquals("getData", newEntry.getData(), last.getReplicatedLogEntry().getData());
+        assertEquals("getData", data, last.getReplicatedLogEntry().getData());
         assertEquals("getIdentifier", id, last.getIdentifier());
     }
 
@@ -587,11 +581,6 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         final MockRaftActorContext actorContext = createActorContextWithFollower();
 
-        Map<String, String> leadersSnapshot = new HashMap<>();
-        leadersSnapshot.put("1", "A");
-        leadersSnapshot.put("2", "B");
-        leadersSnapshot.put("3", "C");
-
         //clears leaders log
         actorContext.getReplicatedLog().removeFrom(0);
 
@@ -614,12 +603,12 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         //update follower timestamp
         leader.markFollowerActive(FOLLOWER_ID);
 
-        ByteString bs = toByteString(leadersSnapshot);
+        ByteString bs = toByteString(Map.of("1", "A", "2", "B", "3", "C"));
         leader.setSnapshotHolder(new SnapshotHolder(Snapshot.create(ByteState.of(bs.toByteArray()),
-                Collections.<ReplicatedLogEntry>emptyList(), commitIndex, snapshotTerm, commitIndex, snapshotTerm,
+                List.of(), commitIndex, snapshotTerm, commitIndex, snapshotTerm,
                 -1, null, null), ByteSource.wrap(bs.toByteArray())));
         LeaderInstallSnapshotState fts = new LeaderInstallSnapshotState(
-                actorContext.getConfigParams().getSnapshotChunkSize(), leader.logName());
+                actorContext.getConfigParams().getMaximumMessageSliceSize(), leader.logName());
         fts.setSnapshotBytes(ByteSource.wrap(bs.toByteArray()));
         leader.getFollower(FOLLOWER_ID).setLeaderInstallSnapshotState(fts);
 
@@ -677,18 +666,15 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
 
         // new entry
-        SimpleReplicatedLogEntry entry =
-                new SimpleReplicatedLogEntry(newEntryIndex, currentTerm,
-                        new MockRaftActorContext.MockPayload("D"));
-
-        actorContext.getReplicatedLog().append(entry);
+        actorContext.getReplicatedLog().append(
+            new SimpleReplicatedLogEntry(newEntryIndex, currentTerm, new MockRaftActorContext.MockPayload("D")));
 
         //update follower timestamp
         leader.markFollowerActive(FOLLOWER_ID);
 
         // this should invoke a sendinstallsnapshot as followersLastIndex < snapshotIndex
         RaftActorBehavior raftBehavior = leader.handleMessage(
-                leaderActor, new Replicate(null, new MockIdentifier("state-id"), entry, true));
+                leaderActor, new Replicate(newEntryIndex, true, null, new MockIdentifier("state-id")));
 
         assertTrue(raftBehavior instanceof Leader);
 
@@ -725,15 +711,13 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         leader.setSnapshotHolder(null);
 
         // new entry
-        SimpleReplicatedLogEntry entry = new SimpleReplicatedLogEntry(newEntryIndex, currentTerm,
-                new MockRaftActorContext.MockPayload("D"));
-
-        actorContext.getReplicatedLog().append(entry);
+        actorContext.getReplicatedLog().append(
+            new SimpleReplicatedLogEntry(newEntryIndex, currentTerm, new MockRaftActorContext.MockPayload("D")));
 
         //update follower timestamp
         leader.markFollowerActive(FOLLOWER_ID);
 
-        leader.handleMessage(leaderActor, new Replicate(null, new MockIdentifier("state-id"), entry, true));
+        leader.handleMessage(leaderActor, new Replicate(newEntryIndex, true, null, new MockIdentifier("state-id")));
 
         assertEquals("isCapturing", true, actorContext.getSnapshotManager().isCapturing());
 
@@ -745,7 +729,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         assertEquals(2, cs.getLastTerm());
 
         // if an initiate is started again when first is in progress, it shouldnt initiate Capture
-        leader.handleMessage(leaderActor, new Replicate(null, new MockIdentifier("state-id"), entry, true));
+        leader.handleMessage(leaderActor, new Replicate(newEntryIndex, true, null, new MockIdentifier("state-id")));
 
         assertSame("CaptureSnapshot instance", cs, actorContext.getSnapshotManager().getCaptureSnapshot());
     }
@@ -788,10 +772,8 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         }
 
         // new entry
-        SimpleReplicatedLogEntry entry = new SimpleReplicatedLogEntry(newEntryIndex, currentTerm,
-                new MockRaftActorContext.MockPayload("D"));
-
-        actorContext.getReplicatedLog().append(entry);
+        actorContext.getReplicatedLog().append(
+            new SimpleReplicatedLogEntry(newEntryIndex, currentTerm, new MockRaftActorContext.MockPayload("D")));
 
         //update follower timestamp
         leader.markFollowerActive(FOLLOWER_ID);
@@ -815,7 +797,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         MessageCollectorActor.clearMessages(followerActor);
 
         // Sending Replicate message should not initiate another capture since the first is in progress.
-        leader.handleMessage(leaderActor, new Replicate(null, new MockIdentifier("state-id"), entry, true));
+        leader.handleMessage(leaderActor, new Replicate(newEntryIndex, true, null, new MockIdentifier("state-id")));
         assertSame("CaptureSnapshot instance", cs, actorContext.getSnapshotManager().getCaptureSnapshot());
 
         // Similarly sending another AppendEntriesReply to force a snapshot should not initiate another capture.
@@ -825,7 +807,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         // Now simulate the CaptureSnapshotReply to initiate snapshot install - the first chunk should be sent.
         final byte[] bytes = new byte[]{1, 2, 3};
-        installSnapshotStream.get().get().write(bytes);
+        installSnapshotStream.get().orElseThrow().write(bytes);
         actorContext.getSnapshotManager().persist(ByteState.of(bytes), installSnapshotStream.get(),
                 Runtime.getRuntime().totalMemory());
         MessageCollectorActor.expectFirstMatching(followerActor, InstallSnapshot.class);
@@ -873,7 +855,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         leader.getFollower(FOLLOWER_ID).setNextIndex(0);
 
         byte[] bytes = toByteString(leadersSnapshot).toByteArray();
-        Snapshot snapshot = Snapshot.create(ByteState.of(bytes), Collections.<ReplicatedLogEntry>emptyList(),
+        Snapshot snapshot = Snapshot.create(ByteState.of(bytes), List.of(),
                 lastAppliedIndex, snapshotTerm, lastAppliedIndex, snapshotTerm, -1, null, null);
 
         RaftActorBehavior raftBehavior = leader.handleMessage(leaderActor,
@@ -925,7 +907,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         leader.getFollower(FOLLOWER_ID).setNextIndex(-1);
 
         byte[] bytes = toByteString(leadersSnapshot).toByteArray();
-        Snapshot snapshot = Snapshot.create(ByteState.of(bytes), Collections.<ReplicatedLogEntry>emptyList(),
+        Snapshot snapshot = Snapshot.create(ByteState.of(bytes), List.of(),
                 lastAppliedIndex, snapshotTerm, lastAppliedIndex, snapshotTerm, -1, null, null);
 
         RaftActorBehavior raftBehavior = leader.handleMessage(leaderActor,
@@ -980,10 +962,10 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         ByteString bs = toByteString(leadersSnapshot);
         leader.setSnapshotHolder(new SnapshotHolder(Snapshot.create(ByteState.of(bs.toByteArray()),
-                Collections.<ReplicatedLogEntry>emptyList(), commitIndex, snapshotTerm, commitIndex, snapshotTerm,
+                List.of(), commitIndex, snapshotTerm, commitIndex, snapshotTerm,
                 -1, null, null), ByteSource.wrap(bs.toByteArray())));
         LeaderInstallSnapshotState fts = new LeaderInstallSnapshotState(
-                actorContext.getConfigParams().getSnapshotChunkSize(), leader.logName());
+                actorContext.getConfigParams().getMaximumMessageSliceSize(), leader.logName());
         fts.setSnapshotBytes(ByteSource.wrap(bs.toByteArray()));
         leader.getFollower(FOLLOWER_ID).setLeaderInstallSnapshotState(fts);
         while (!fts.isLastChunk(fts.getChunkIndex())) {
@@ -1021,7 +1003,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl() {
             @Override
-            public int getSnapshotChunkSize() {
+            public int getMaximumMessageSliceSize() {
                 return 50;
             }
         };
@@ -1049,8 +1031,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         ByteString bs = toByteString(leadersSnapshot);
         Snapshot snapshot = Snapshot.create(ByteState.of(bs.toByteArray()),
-                Collections.<ReplicatedLogEntry>emptyList(), commitIndex, snapshotTerm, commitIndex, snapshotTerm,
-                -1, null, null);
+                List.of(), commitIndex, snapshotTerm, commitIndex, snapshotTerm, -1, null, null);
 
         leader.handleMessage(leaderActor, new SendInstallSnapshot(snapshot, ByteSource.wrap(bs.toByteArray())));
 
@@ -1099,7 +1080,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         actorContext.setConfigParams(new DefaultConfigParamsImpl() {
             @Override
-            public int getSnapshotChunkSize() {
+            public int getMaximumMessageSliceSize() {
                 return 50;
             }
         });
@@ -1123,8 +1104,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         ByteString bs = toByteString(leadersSnapshot);
         Snapshot snapshot = Snapshot.create(ByteState.of(bs.toByteArray()),
-                Collections.<ReplicatedLogEntry>emptyList(), commitIndex, snapshotTerm, commitIndex, snapshotTerm,
-                -1, null, null);
+                List.of(), commitIndex, snapshotTerm, commitIndex, snapshotTerm, -1, null, null);
 
         Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
         leader.handleMessage(leaderActor, new SendInstallSnapshot(snapshot, ByteSource.wrap(bs.toByteArray())));
@@ -1164,7 +1144,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         actorContext.setConfigParams(new DefaultConfigParamsImpl() {
             @Override
-            public int getSnapshotChunkSize() {
+            public int getMaximumMessageSliceSize() {
                 return 50;
             }
         });
@@ -1188,8 +1168,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         ByteString bs = toByteString(leadersSnapshot);
         Snapshot snapshot = Snapshot.create(ByteState.of(bs.toByteArray()),
-                Collections.<ReplicatedLogEntry>emptyList(), commitIndex, snapshotTerm, commitIndex, snapshotTerm,
-                -1, null, null);
+                List.of(), commitIndex, snapshotTerm, commitIndex, snapshotTerm, -1, null, null);
 
         leader.handleMessage(leaderActor, new SendInstallSnapshot(snapshot, ByteSource.wrap(bs.toByteArray())));
 
@@ -1198,8 +1177,8 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         assertEquals(1, installSnapshot.getChunkIndex());
         assertEquals(3, installSnapshot.getTotalChunks());
-        assertEquals(LeaderInstallSnapshotState.INITIAL_LAST_CHUNK_HASH_CODE,
-                installSnapshot.getLastChunkHashCode().getAsInt());
+        assertEquals(OptionalInt.of(LeaderInstallSnapshotState.INITIAL_LAST_CHUNK_HASH_CODE),
+                installSnapshot.getLastChunkHashCode());
 
         final int hashCode = Arrays.hashCode(installSnapshot.getData());
 
@@ -1212,7 +1191,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         assertEquals(2, installSnapshot.getChunkIndex());
         assertEquals(3, installSnapshot.getTotalChunks());
-        assertEquals(hashCode, installSnapshot.getLastChunkHashCode().getAsInt());
+        assertEquals(OptionalInt.of(hashCode), installSnapshot.getLastChunkHashCode());
     }
 
     @Test
@@ -1282,8 +1261,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
     private MockRaftActorContext createActorContextWithFollower() {
         MockRaftActorContext actorContext = createActorContext();
-        actorContext.setPeerAddresses(ImmutableMap.<String, String>builder().put(FOLLOWER_ID,
-                followerActor.path().toString()).build());
+        actorContext.setPeerAddresses(Map.of(FOLLOWER_ID, followerActor.path().toString()));
         return actorContext;
     }
 
@@ -1292,7 +1270,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         DefaultConfigParamsImpl followerConfig = new DefaultConfigParamsImpl();
         followerConfig.setElectionTimeoutFactor(10000);
         followerActorContext.setConfigParams(followerConfig);
-        followerActorContext.setPeerAddresses(ImmutableMap.of(LEADER_ID, leaderActor.path().toString()));
+        followerActorContext.setPeerAddresses(Map.of(LEADER_ID, leaderActor.path().toString()));
         return followerActorContext;
     }
 
@@ -1358,7 +1336,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         final MockRaftActorContext leaderActorContext = createActorContext();
 
         MockRaftActorContext followerActorContext = createActorContext(FOLLOWER_ID, followerActor);
-        followerActorContext.setPeerAddresses(ImmutableMap.of(LEADER_ID, leaderActor.path().toString()));
+        followerActorContext.setPeerAddresses(Map.of(LEADER_ID, leaderActor.path().toString()));
 
         Follower follower = new Follower(followerActorContext);
         followerActor.underlyingActor().setBehavior(follower);
@@ -1739,7 +1717,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         FollowerLogInformation followerInfo = leader.getFollower(FOLLOWER_ID);
 
         assertEquals(payloadVersion, leader.getLeaderPayloadVersion());
-        assertEquals(RaftVersions.HELIUM_VERSION, followerInfo.getRaftVersion());
+        assertEquals(RaftVersions.FLUORINE_VERSION, followerInfo.getRaftVersion());
 
         AppendEntriesReply reply = new AppendEntriesReply(FOLLOWER_ID, 1, true, 2, 1, payloadVersion);
 
@@ -1793,7 +1771,8 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         MockRaftActorContext leaderActorContext = createActorContextWithFollower();
         ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setHeartBeatInterval(
                 new FiniteDuration(1000, TimeUnit.SECONDS));
-        ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setSnapshotChunkSize(2);
+        // Note: the size here depends on estimate
+        ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setMaximumMessageSliceSize(246);
 
         leaderActorContext.setReplicatedLog(
                 new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 4, 1).build());
@@ -2268,7 +2247,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         logStart("testReplicationWithPayloadSizeThatExceedsThreshold");
 
         final int serializedSize = SerializationUtils.serialize(new AppendEntries(1, LEADER_ID, -1, -1,
-                Arrays.asList(new SimpleReplicatedLogEntry(0, 1,
+                List.of(new SimpleReplicatedLogEntry(0, 1,
                         new MockRaftActorContext.MockPayload("large"))), 0, -1, (short)0)).length;
         final MockRaftActorContext.MockPayload largePayload =
                 new MockRaftActorContext.MockPayload("large", serializedSize);
@@ -2276,7 +2255,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         MockRaftActorContext leaderActorContext = createActorContextWithFollower();
         ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setHeartBeatInterval(
                 new FiniteDuration(300, TimeUnit.MILLISECONDS));
-        ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setSnapshotChunkSize(serializedSize - 50);
+        ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setMaximumMessageSliceSize(serializedSize - 50);
         leaderActorContext.setReplicatedLog(new MockRaftActorContext.MockReplicatedLogBuilder().build());
         leaderActorContext.setCommitIndex(-1);
         leaderActorContext.setLastApplied(-1);
@@ -2360,7 +2339,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setHeartBeatInterval(
                 new FiniteDuration(100, TimeUnit.MILLISECONDS));
         ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setElectionTimeoutFactor(1);
-        ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setSnapshotChunkSize(10);
+        ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setMaximumMessageSliceSize(10);
         leaderActorContext.setReplicatedLog(new MockRaftActorContext.MockReplicatedLogBuilder().build());
         leaderActorContext.setCommitIndex(-1);
         leaderActorContext.setLastApplied(-1);
@@ -2375,7 +2354,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         MessageCollectorActor.clearMessages(followerActor);
 
         sendReplicate(leaderActorContext, term, 0, new MockRaftActorContext.MockPayload("large",
-                leaderActorContext.getConfigParams().getSnapshotChunkSize() + 1));
+                leaderActorContext.getConfigParams().getMaximumMessageSliceSize() + 1));
         MessageCollectorActor.expectFirstMatching(followerActor, MessageSlice.class);
 
         // Sleep for at least 3 * election timeout so the slicing state expires.
@@ -2422,7 +2401,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         // Initial heartbeat shouldn't have the leader address
 
         AppendEntries appendEntries = MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
-        assertFalse(appendEntries.getLeaderAddress().isPresent());
+        assertNull(appendEntries.leaderAddress());
         MessageCollectorActor.clearMessages(followerActor);
 
         // Send AppendEntriesReply indicating the follower needs the leader address
@@ -2437,8 +2416,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         leader.handleMessage(leaderActor, SendHeartBeat.INSTANCE);
 
         appendEntries = MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
-        assertTrue(appendEntries.getLeaderAddress().isPresent());
-        assertEquals(leaderActor.path().toString(), appendEntries.getLeaderAddress().get());
+        assertEquals(leaderActor.path().toString(), appendEntries.leaderAddress());
         MessageCollectorActor.clearMessages(followerActor);
 
         // Send AppendEntriesReply indicating the follower does not need the leader address
@@ -2452,7 +2430,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         leader.handleMessage(leaderActor, SendHeartBeat.INSTANCE);
 
         appendEntries = MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
-        assertFalse(appendEntries.getLeaderAddress().isPresent());
+        assertNull(appendEntries.leaderAddress());
     }
 
     @Override
@@ -2462,14 +2440,14 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         assertEquals("New votedFor", null, actorContext.getTermInformation().getVotedFor());
     }
 
-    private class MockConfigParamsImpl extends DefaultConfigParamsImpl {
+    private static class MockConfigParamsImpl extends DefaultConfigParamsImpl {
 
         private final long electionTimeOutIntervalMillis;
-        private final int snapshotChunkSize;
+        private final int maximumMessageSliceSize;
 
-        MockConfigParamsImpl(final long electionTimeOutIntervalMillis, final int snapshotChunkSize) {
+        MockConfigParamsImpl(final long electionTimeOutIntervalMillis, final int maximumMessageSliceSize) {
             this.electionTimeOutIntervalMillis = electionTimeOutIntervalMillis;
-            this.snapshotChunkSize = snapshotChunkSize;
+            this.maximumMessageSliceSize = maximumMessageSliceSize;
         }
 
         @Override
@@ -2478,8 +2456,8 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         }
 
         @Override
-        public int getSnapshotChunkSize() {
-            return snapshotChunkSize;
+        public int getMaximumMessageSliceSize() {
+            return maximumMessageSliceSize;
         }
     }
 }
index d8d2b4045bd9fa362cfd250927374d44bb3e88f7..2c83f67582f4b97c452fa0f1e0b6c3554c85faef 100644 (file)
@@ -16,10 +16,8 @@ import static org.mockito.Mockito.verify;
 import akka.protobuf.ByteString;
 import com.google.common.io.ByteSource;
 import java.io.IOException;
-import java.io.Serializable;
 import java.util.Arrays;
 import java.util.HashMap;
-import java.util.Map;
 import java.util.OptionalInt;
 import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Before;
@@ -37,10 +35,11 @@ import org.slf4j.LoggerFactory;
 public class SnapshotTrackerTest {
     private static final Logger LOG = LoggerFactory.getLogger(SnapshotTrackerTest.class);
 
+    private final HashMap<String, String> data = new HashMap<>();
+
     @Mock
     private RaftActorContext mockContext;
     private FileBackedOutputStream fbos;
-    private Map<String, String> data;
     private ByteString byteString;
     private byte[] chunk1;
     private byte[] chunk2;
@@ -48,12 +47,11 @@ public class SnapshotTrackerTest {
 
     @Before
     public void setup() {
-        data = new HashMap<>();
         data.put("key1", "value1");
         data.put("key2", "value2");
         data.put("key3", "value3");
 
-        byteString = ByteString.copyFrom(SerializationUtils.serialize((Serializable) data));
+        byteString = ByteString.copyFrom(SerializationUtils.serialize(data));
         chunk1 = getNextChunk(byteString, 0, 10);
         chunk2 = getNextChunk(byteString, 10, 10);
         chunk3 = getNextChunk(byteString, 20, byteString.size());
@@ -123,10 +121,8 @@ public class SnapshotTrackerTest {
         int start = offset;
         if (size > snapshotLength) {
             size = snapshotLength;
-        } else {
-            if (start + size > snapshotLength) {
-                size = snapshotLength - start;
-            }
+        } else if (start + size > snapshotLength) {
+            size = snapshotLength - start;
         }
 
         byte[] nextChunk = new byte[size];
index 81b9fbbb860bd98767d0283379ce849526b13f1f..4db399666f27725bc411736744a407e1d93273d7 100644 (file)
@@ -7,9 +7,10 @@
  */
 package org.opendaylight.controller.cluster.raft.client.messages;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertSame;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -18,10 +19,11 @@ import org.junit.Test;
  * @author Thomas Pantelis
  */
 public class ShutdownTest {
-
     @Test
     public void test() {
-        Shutdown cloned = (Shutdown) SerializationUtils.clone(Shutdown.INSTANCE);
+        final var bytes = SerializationUtils.serialize(Shutdown.INSTANCE);
+        assertEquals(86, bytes.length);
+        final var cloned = SerializationUtils.deserialize(bytes);
         assertSame("Cloned instance", Shutdown.INSTANCE, cloned);
     }
 }
index 8452a71c24a1b14faf5525b9513eebf80bda1722..79c7477ba2d892c854ea0e9998417a47063506c5 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.messages;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.RaftVersions;
 
@@ -19,29 +19,14 @@ import org.opendaylight.controller.cluster.raft.RaftVersions;
  * @author Thomas Pantelis
  */
 public class AppendEntriesReplyTest {
-
     @Test
     public void testSerialization() {
-        AppendEntriesReply expected = new AppendEntriesReply("follower", 5, true, 100, 4, (short)6, true, true,
-                RaftVersions.CURRENT_VERSION);
-        AppendEntriesReply cloned = (AppendEntriesReply) SerializationUtils.clone(expected);
+        final var expected = new AppendEntriesReply("follower", 5, true, 100, 4, (short)6, true, true,
+            RaftVersions.CURRENT_VERSION);
 
-        assertEquals("getTerm", expected.getTerm(), cloned.getTerm());
-        assertEquals("getFollowerId", expected.getFollowerId(), cloned.getFollowerId());
-        assertEquals("getLogLastTerm", expected.getLogLastTerm(), cloned.getLogLastTerm());
-        assertEquals("getLogLastIndex", expected.getLogLastIndex(), cloned.getLogLastIndex());
-        assertEquals("getPayloadVersion", expected.getPayloadVersion(), cloned.getPayloadVersion());
-        assertEquals("getRaftVersion", expected.getRaftVersion(), cloned.getRaftVersion());
-        assertEquals("isForceInstallSnapshot", expected.isForceInstallSnapshot(), cloned.isForceInstallSnapshot());
-        assertEquals("isNeedsLeaderAddress", expected.isNeedsLeaderAddress(), cloned.isNeedsLeaderAddress());
-    }
-
-    @Test
-    @Deprecated
-    public void testPreFluorineSerialization() {
-        AppendEntriesReply expected = new AppendEntriesReply("follower", 5, true, 100, 4, (short)6, true, true,
-                RaftVersions.BORON_VERSION);
-        AppendEntriesReply cloned = (AppendEntriesReply) SerializationUtils.clone(expected);
+        final var bytes = SerializationUtils.serialize(expected);
+        assertEquals(98, bytes.length);
+        final var cloned = (AppendEntriesReply) SerializationUtils.deserialize(bytes);
 
         assertEquals("getTerm", expected.getTerm(), cloned.getTerm());
         assertEquals("getFollowerId", expected.getFollowerId(), cloned.getFollowerId());
@@ -50,6 +35,6 @@ public class AppendEntriesReplyTest {
         assertEquals("getPayloadVersion", expected.getPayloadVersion(), cloned.getPayloadVersion());
         assertEquals("getRaftVersion", expected.getRaftVersion(), cloned.getRaftVersion());
         assertEquals("isForceInstallSnapshot", expected.isForceInstallSnapshot(), cloned.isForceInstallSnapshot());
-        assertEquals("isNeedsLeaderAddress", false, cloned.isNeedsLeaderAddress());
+        assertEquals("isNeedsLeaderAddress", expected.isNeedsLeaderAddress(), cloned.isNeedsLeaderAddress());
     }
 }
index a7c3c8b9d5e97bde0e4b6fb431e105545eea754b..38f1defb9f420fdf7769fd2a5270feb9476b3544 100644 (file)
@@ -8,11 +8,10 @@
 package org.opendaylight.controller.cluster.raft.messages;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 
-import java.util.Arrays;
 import java.util.Iterator;
-import org.apache.commons.lang.SerializationUtils;
+import java.util.List;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
 import org.opendaylight.controller.cluster.raft.RaftVersions;
@@ -25,7 +24,6 @@ import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEnt
  * @author Thomas Pantelis
  */
 public class AppendEntriesTest {
-
     @Test
     public void testSerialization() {
         ReplicatedLogEntry entry1 = new SimpleReplicatedLogEntry(1, 2, new MockPayload("payload1"));
@@ -36,41 +34,29 @@ public class AppendEntriesTest {
 
         // Without leader address
 
-        AppendEntries expected = new AppendEntries(5L, "node1", 7L, 8L, Arrays.asList(entry1, entry2), 10L,
-                -1, payloadVersion, RaftVersions.CURRENT_VERSION, null);
+        var expected = new AppendEntries(5L, "node1", 7L, 8L, List.of(entry1, entry2), 10L, -1, payloadVersion,
+            RaftVersions.CURRENT_VERSION, null);
 
-        AppendEntries cloned = (AppendEntries) SerializationUtils.clone(expected);
+        var bytes = SerializationUtils.serialize(expected);
+        assertEquals(285, bytes.length);
+        var cloned = (AppendEntries) SerializationUtils.deserialize(bytes);
 
         verifyAppendEntries(expected, cloned, RaftVersions.CURRENT_VERSION);
 
         // With leader address
 
-        expected = new AppendEntries(5L, "node1", 7L, 8L, Arrays.asList(entry1, entry2), 10L,
-                -1, payloadVersion, RaftVersions.CURRENT_VERSION, "leader address");
+        expected = new AppendEntries(5L, "node1", 7L, 8L, List.of(entry1, entry2), 10L, -1, payloadVersion,
+            RaftVersions.CURRENT_VERSION, "leader address");
 
-        cloned = (AppendEntries) SerializationUtils.clone(expected);
+        bytes = SerializationUtils.serialize(expected);
+        assertEquals(301, bytes.length);
+        cloned = (AppendEntries) SerializationUtils.deserialize(bytes);
 
         verifyAppendEntries(expected, cloned, RaftVersions.CURRENT_VERSION);
     }
 
-    @Test
-    @Deprecated
-    public void testPreFluorineSerialization() {
-        ReplicatedLogEntry entry1 = new SimpleReplicatedLogEntry(1, 2, new MockPayload("payload1"));
-
-        ReplicatedLogEntry entry2 = new SimpleReplicatedLogEntry(3, 4, new MockPayload("payload2"));
-
-        short payloadVersion = 5;
-
-        AppendEntries expected = new AppendEntries(5L, "node1", 7L, 8L, Arrays.asList(entry1, entry2), 10L,
-                -1, payloadVersion, RaftVersions.BORON_VERSION, "leader address");
-
-        AppendEntries cloned = (AppendEntries) SerializationUtils.clone(expected);
-
-        verifyAppendEntries(expected, cloned, RaftVersions.BORON_VERSION);
-    }
-
-    private static void verifyAppendEntries(AppendEntries expected, AppendEntries actual, short recipientRaftVersion) {
+    private static void verifyAppendEntries(final AppendEntries expected, final AppendEntries actual,
+            final short recipientRaftVersion) {
         assertEquals("getLeaderId", expected.getLeaderId(), actual.getLeaderId());
         assertEquals("getTerm", expected.getTerm(), actual.getTerm());
         assertEquals("getLeaderCommit", expected.getLeaderCommit(), actual.getLeaderCommit());
@@ -85,16 +71,11 @@ public class AppendEntriesTest {
             verifyReplicatedLogEntry(iter.next(), e);
         }
 
-        if (recipientRaftVersion >= RaftVersions.FLUORINE_VERSION) {
-            assertEquals("getLeaderAddress", expected.getLeaderAddress(), actual.getLeaderAddress());
-            assertEquals("getLeaderRaftVersion", RaftVersions.CURRENT_VERSION, actual.getLeaderRaftVersion());
-        } else {
-            assertFalse(actual.getLeaderAddress().isPresent());
-            assertEquals("getLeaderRaftVersion", RaftVersions.BORON_VERSION, actual.getLeaderRaftVersion());
-        }
+        assertEquals("getLeaderAddress", expected.leaderAddress(), actual.leaderAddress());
+        assertEquals("getLeaderRaftVersion", RaftVersions.CURRENT_VERSION, actual.getLeaderRaftVersion());
     }
 
-    private static void verifyReplicatedLogEntry(ReplicatedLogEntry expected, ReplicatedLogEntry actual) {
+    private static void verifyReplicatedLogEntry(final ReplicatedLogEntry expected, final ReplicatedLogEntry actual) {
         assertEquals("getIndex", expected.getIndex(), actual.getIndex());
         assertEquals("getTerm", expected.getTerm(), actual.getTerm());
         assertEquals("getData", expected.getData().toString(), actual.getData().toString());
index 2841d989cf68772f82bb15a1023c4c2a24372d82..9db4cf4d037907461ec0b92ba724f4b933d61c16 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.messages;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -18,11 +18,12 @@ import org.junit.Test;
  * @author Thomas Pantelis
  */
 public class InstallSnapshotReplyTest {
-
     @Test
     public void testSerialization() {
-        InstallSnapshotReply expected = new InstallSnapshotReply(5L, "follower", 1, true);
-        InstallSnapshotReply cloned = (InstallSnapshotReply) SerializationUtils.clone(expected);
+        final var expected = new InstallSnapshotReply(5L, "follower", 1, true);
+        final var bytes = SerializationUtils.serialize(expected);
+        assertEquals(95, bytes.length);
+        final var cloned = (InstallSnapshotReply) SerializationUtils.deserialize(bytes);
 
         assertEquals("getTerm", expected.getTerm(), cloned.getTerm());
         assertEquals("getFollowerId", expected.getFollowerId(), cloned.getFollowerId());
index c7fad2a19107e05332a873c5c2bad172854be3ab..090ab77dad7275cf68848bf80affc2dcafaa21fe 100644 (file)
@@ -10,11 +10,10 @@ package org.opendaylight.controller.cluster.raft.messages;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 
-import java.io.Serializable;
-import java.util.Arrays;
+import java.util.List;
 import java.util.Optional;
 import java.util.OptionalInt;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.RaftVersions;
 import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
@@ -26,9 +25,17 @@ import org.opendaylight.controller.cluster.raft.persisted.ServerInfo;
  * @author Thomas Pantelis
  */
 public class InstallSnapshotTest {
+    @Test
+    public void testCurrentSerialization() {
+        testSerialization(RaftVersions.CURRENT_VERSION, 1262, 1125);
+    }
 
     @Test
-    public void testSerialization() {
+    public void testFluorineSerialization() {
+        testSerialization(RaftVersions.FLUORINE_VERSION, 1302, 1165);
+    }
+
+    private static void testSerialization(final short raftVersion, final int fullSize, final int emptySize) {
         byte[] data = new byte[1000];
         for (int i = 0, j = 0; i < data.length; i++) {
             data[i] = (byte)j;
@@ -37,21 +44,19 @@ public class InstallSnapshotTest {
             }
         }
 
-        ServerConfigurationPayload serverConfig = new ServerConfigurationPayload(Arrays.asList(
+        var serverConfig = new ServerConfigurationPayload(List.of(
                 new ServerInfo("leader", true), new ServerInfo("follower", false)));
-        InstallSnapshot expected = new InstallSnapshot(3L, "leaderId", 11L, 2L, data, 5, 6, OptionalInt.of(54321),
-            Optional.of(serverConfig));
-
-        Object serialized = expected.toSerializable(RaftVersions.CURRENT_VERSION);
-        assertEquals("Serialized type", InstallSnapshot.class, serialized.getClass());
+        assertInstallSnapshot(fullSize, new InstallSnapshot(3L, "leaderId", 11L, 2L, data, 5, 6, OptionalInt.of(54321),
+            Optional.of(serverConfig), raftVersion));
 
-        InstallSnapshot actual = (InstallSnapshot) SerializationUtils.clone((Serializable) serialized);
-        verifyInstallSnapshot(expected, actual);
+        assertInstallSnapshot(emptySize, new InstallSnapshot(3L, "leaderId", 11L, 2L, data, 5, 6, OptionalInt.empty(),
+            Optional.empty(), raftVersion));
+    }
 
-        expected = new InstallSnapshot(3L, "leaderId", 11L, 2L, data, 5, 6);
-        actual = (InstallSnapshot) SerializationUtils.clone((Serializable) expected.toSerializable(
-                RaftVersions.CURRENT_VERSION));
-        verifyInstallSnapshot(expected, actual);
+    private static void assertInstallSnapshot(final int expectedSize, final InstallSnapshot expected) {
+        final var bytes = SerializationUtils.serialize(expected);
+        assertEquals(expectedSize, bytes.length);
+        verifyInstallSnapshot(expected, (InstallSnapshot) SerializationUtils.deserialize(bytes));
     }
 
     private static void verifyInstallSnapshot(final InstallSnapshot expected, final InstallSnapshot actual) {
@@ -74,8 +79,8 @@ public class InstallSnapshotTest {
         assertEquals("getServerConfig present", expected.getServerConfig().isPresent(),
                 actual.getServerConfig().isPresent());
         if (expected.getServerConfig().isPresent()) {
-            assertEquals("getServerConfig", expected.getServerConfig().get().getServerConfig(),
-                    actual.getServerConfig().get().getServerConfig());
+            assertEquals("getServerConfig", expected.getServerConfig().orElseThrow().getServerConfig(),
+                    actual.getServerConfig().orElseThrow().getServerConfig());
         }
     }
 }
index fa1bb5f15277d97cab4268bb3da6c0ac92e90feb..51488a362ce411e4cb676667efe9d63b6e1b0ae2 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.messages;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -18,11 +18,12 @@ import org.junit.Test;
  * @author Thomas Pantelis
  */
 public class RequestVoteReplyTest {
-
     @Test
     public void testSerialization() {
-        RequestVoteReply expected = new RequestVoteReply(5, true);
-        RequestVoteReply cloned = (RequestVoteReply) SerializationUtils.clone(expected);
+        final var expected = new RequestVoteReply(5, true);
+        final var bytes = SerializationUtils.serialize(expected);
+        assertEquals(78, bytes.length);
+        final var cloned = (RequestVoteReply) SerializationUtils.deserialize(bytes);
 
         assertEquals("getTerm", expected.getTerm(), cloned.getTerm());
         assertEquals("isVoteGranted", expected.isVoteGranted(), cloned.isVoteGranted());
index 6cb9179dedd3f4af8940dfcb1f6c615a89456700..c3227be60c7955c7336b99b22b7f3ef4f6762949 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.messages;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -18,11 +18,12 @@ import org.junit.Test;
  * @author Thomas Pantelis
  */
 public class RequestVoteTest {
-
     @Test
     public void testSerialization() {
-        RequestVote expected = new RequestVote(4, "candidateId", 3, 2);
-        RequestVote cloned = (RequestVote) SerializationUtils.clone(expected);
+        final var expected = new RequestVote(4, "candidateId", 3, 2);
+        final var bytes = SerializationUtils.serialize(expected);
+        assertEquals(97, bytes.length);
+        final var cloned = (RequestVote) SerializationUtils.deserialize(bytes);
 
         assertEquals("getTerm", expected.getTerm(), cloned.getTerm());
         assertEquals("getCandidateId", expected.getCandidateId(), cloned.getCandidateId());
index b7f152574bcbb478bf7951b1ef8f64aea12198db..c762c1e2d07c3cb0126232df9b26582db7318f70 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.persisted;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -18,11 +18,12 @@ import org.junit.Test;
  * @author Thomas Pantelis
  */
 public class ApplyJournalEntriesTest {
-
     @Test
     public void testSerialization() {
-        ApplyJournalEntries expected = new ApplyJournalEntries(5);
-        ApplyJournalEntries cloned = (ApplyJournalEntries) SerializationUtils.clone(expected);
+        final var expected = new ApplyJournalEntries(5);
+        final var bytes = SerializationUtils.serialize(expected);
+        assertEquals(80, bytes.length);
+        final var cloned = (ApplyJournalEntries) SerializationUtils.deserialize(bytes);
 
         assertEquals("getFromIndex", expected.getToIndex(), cloned.getToIndex());
     }
index 8334296ead188fd2263e855aeaeb164f36af839b..73fb02f2bcd3b542fb8e2154a956a190512b39f3 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.persisted;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -18,11 +18,12 @@ import org.junit.Test;
  * @author Thomas Pantelis
  */
 public class DeleteEntriesTest {
-
     @Test
     public void testSerialization() {
-        DeleteEntries expected = new DeleteEntries(5);
-        DeleteEntries cloned = (DeleteEntries) SerializationUtils.clone(expected);
+        final var expected = new DeleteEntries(5);
+        final var bytes = SerializationUtils.serialize(expected);
+        assertEquals(79, bytes.length);
+        final var cloned = (DeleteEntries) SerializationUtils.deserialize(bytes);
 
         assertEquals("getFromIndex", expected.getFromIndex(), cloned.getFromIndex());
     }
index 963580cde4b04828cbe08b90bc70cebbaf4fd4ba..18fa2d7719b64a41d221aeabc8e2a76fa7f7987e 100644 (file)
@@ -7,9 +7,10 @@
  */
 package org.opendaylight.controller.cluster.raft.persisted;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertSame;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -19,10 +20,11 @@ import org.junit.Test;
  *
  */
 public class EmptyStateTest {
-
     @Test
     public void testSerialization() {
-        EmptyState cloned = (EmptyState) SerializationUtils.clone(EmptyState.INSTANCE);
+        final var bytes = SerializationUtils.serialize(EmptyState.INSTANCE);
+        assertEquals(82, bytes.length);
+        final var cloned = SerializationUtils.deserialize(bytes);
         assertSame("cloned", EmptyState.INSTANCE, cloned);
     }
 }
diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/NoopPayloadTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/NoopPayloadTest.java
new file mode 100644 (file)
index 0000000..bf2e8fa
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.persisted;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+
+import org.apache.commons.lang3.SerializationUtils;
+import org.junit.Test;
+
+public class NoopPayloadTest {
+    @Test
+    public void testSerialization() {
+        final var bytes = SerializationUtils.serialize(NoopPayload.INSTANCE);
+        assertEquals(74, bytes.length);
+        assertSame(NoopPayload.INSTANCE, SerializationUtils.deserialize(bytes));
+    }
+}
index aa2fe90884540760fcd0dbd99b3f754dfa3f9412..d686e946e72064771390ce96021afa3bc632259e 100644 (file)
@@ -10,8 +10,8 @@ package org.opendaylight.controller.cluster.raft.persisted;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
-import java.util.Arrays;
-import org.apache.commons.lang.SerializationUtils;
+import java.util.List;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -20,19 +20,21 @@ import org.junit.Test;
  * @author Thomas Pantelis
  */
 public class ServerConfigurationPayloadTest {
-
     @Test
     public void testSerialization() {
-        ServerConfigurationPayload expected = new ServerConfigurationPayload(Arrays.asList(new ServerInfo("1", true),
-                new ServerInfo("2", false)));
-        ServerConfigurationPayload cloned = (ServerConfigurationPayload) SerializationUtils.clone(expected);
+        final var expected = new ServerConfigurationPayload(List.of(new ServerInfo("1", true),
+            new ServerInfo("2", false)));
+
+        final var bytes = SerializationUtils.serialize(expected);
+        assertEquals(125, bytes.length);
+        final var cloned = (ServerConfigurationPayload) SerializationUtils.deserialize(bytes);
 
         assertEquals("getServerConfig", expected.getServerConfig(), cloned.getServerConfig());
     }
 
     @Test
     public void testSize() {
-        ServerConfigurationPayload expected = new ServerConfigurationPayload(Arrays.asList(new ServerInfo("1", true)));
+        final var expected = new ServerConfigurationPayload(List.of(new ServerInfo("1", true)));
         assertTrue(expected.size() > 0);
     }
 }
index ec4a3689b2a4a109bb2c28d81aecbab214fe225f..919aaba4cd6a4d10c50fab4962bdd83d8f2e8055 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.persisted;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
 
@@ -19,12 +19,12 @@ import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
  * @author Thomas Pantelis
  */
 public class SimpleReplicatedLogEntryTest {
-
     @Test
     public void testSerialization() {
-        SimpleReplicatedLogEntry expected = new SimpleReplicatedLogEntry(0, 1,
-                new MockRaftActorContext.MockPayload("A"));
-        SimpleReplicatedLogEntry cloned = (SimpleReplicatedLogEntry) SerializationUtils.clone(expected);
+        final var expected = new SimpleReplicatedLogEntry(0, 1, new MockRaftActorContext.MockPayload("A"));
+        final var bytes = SerializationUtils.serialize(expected);
+        assertEquals(218, bytes.length);
+        final var cloned = (SimpleReplicatedLogEntry) SerializationUtils.deserialize(bytes);
 
         assertEquals("getTerm", expected.getTerm(), cloned.getTerm());
         assertEquals("getIndex", expected.getIndex(), cloned.getIndex());
index 9f1f924252da9a9fd624973944f47ac42bb573b7..3223e482d6e75eb226a95af5049851a8e0e585ad 100644 (file)
@@ -9,10 +9,8 @@ package org.opendaylight.controller.cluster.raft.persisted;
 
 import static org.junit.Assert.assertEquals;
 
-import java.util.Arrays;
-import java.util.Collections;
 import java.util.List;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
@@ -23,27 +21,29 @@ import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
  * @author Thomas Pantelis
  */
 public class SnapshotTest {
-
     @Test
     public void testSerialization() {
-        testSerialization(new byte[]{1, 2, 3, 4, 5, 6, 7}, Arrays.asList(
-                new SimpleReplicatedLogEntry(6, 2, new MockPayload("payload"))));
-        testSerialization(new byte[]{1, 2, 3, 4, 5, 6, 7, 8, 9}, Collections.emptyList());
+        testSerialization(new byte[]{1, 2, 3, 4, 5, 6, 7}, List.of(
+                new SimpleReplicatedLogEntry(6, 2, new MockPayload("payload"))), 491);
+        testSerialization(new byte[]{1, 2, 3, 4, 5, 6, 7, 8, 9}, List.of(), 345);
     }
 
-    private static void testSerialization(final byte[] state, final List<ReplicatedLogEntry> unapplied) {
+    private static void testSerialization(final byte[] state, final List<ReplicatedLogEntry> unapplied,
+            final int expectedSize) {
         long lastIndex = 6;
         long lastTerm = 2;
         long lastAppliedIndex = 5;
         long lastAppliedTerm = 1;
         long electionTerm = 3;
         String electionVotedFor = "member-1";
-        ServerConfigurationPayload serverConfig = new ServerConfigurationPayload(Arrays.asList(
+        ServerConfigurationPayload serverConfig = new ServerConfigurationPayload(List.of(
                 new ServerInfo("1", true), new ServerInfo("2", false)));
 
-        Snapshot expected = Snapshot.create(ByteState.of(state), unapplied, lastIndex, lastTerm, lastAppliedIndex,
+        final var expected = Snapshot.create(ByteState.of(state), unapplied, lastIndex, lastTerm, lastAppliedIndex,
                 lastAppliedTerm, electionTerm, electionVotedFor, serverConfig);
-        Snapshot cloned = (Snapshot) SerializationUtils.clone(expected);
+        final var bytes = SerializationUtils.serialize(expected);
+        assertEquals(expectedSize, bytes.length);
+        final var cloned = (Snapshot) SerializationUtils.deserialize(bytes);
 
         assertEquals("lastIndex", expected.getLastIndex(), cloned.getLastIndex());
         assertEquals("lastTerm", expected.getLastTerm(), cloned.getLastTerm());
index de95125966405f1fb3af8d4357cae6218afb2032..75e32783b9b37652c217cbadd67d304bf82efaf0 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.persisted;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -18,11 +18,12 @@ import org.junit.Test;
  * @author Thomas Pantelis
  */
 public class UpdateElectionTermTest {
-
     @Test
     public void testSerialization() {
-        UpdateElectionTerm expected = new UpdateElectionTerm(5, "leader");
-        UpdateElectionTerm cloned = (UpdateElectionTerm) SerializationUtils.clone(expected);
+        final var expected = new UpdateElectionTerm(5, "leader");
+        final var bytes = SerializationUtils.serialize(expected);
+        assertEquals(88, bytes.length);
+        final var cloned = (UpdateElectionTerm) SerializationUtils.deserialize(bytes);
 
         assertEquals("getCurrentTerm", expected.getCurrentTerm(), cloned.getCurrentTerm());
         assertEquals("getVotedFor", expected.getVotedFor(), cloned.getVotedFor());
index a9eea07387b720f8f25080d4ab069efdef125a72..21bf4bfa57acc13c7572a974ce5dc84963152bbb 100644 (file)
@@ -24,7 +24,7 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 import java.util.function.Consumer;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.Option;
@@ -43,7 +43,7 @@ public class InMemoryJournal extends AsyncWriteJournal {
         final Class<?> ofType;
 
         WriteMessagesComplete(final int count, final Class<?> ofType) {
-            this.latch = new CountDownLatch(count);
+            latch = new CountDownLatch(count);
             this.ofType = ofType;
         }
     }
index 3b80172aba6b2eeb3648b44e7e389c008f5ba853..243f3b0bfc38d72eb3980b4608041d6b5dfcf45c 100644 (file)
@@ -12,7 +12,7 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>mdsal-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../parent</relativePath>
     </parent>
 
@@ -20,82 +20,47 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
     <packaging>bundle</packaging>
 
     <dependencies>
-        <!-- Akka -->
         <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>repackaged-akka</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>com.typesafe.akka</groupId>
-            <artifactId>akka-testkit_2.13</artifactId>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
         </dependency>
         <dependency>
-            <groupId>com.typesafe.akka</groupId>
-            <artifactId>akka-persistence-tck_2.13</artifactId>
+            <groupId>com.typesafe</groupId>
+            <artifactId>config</artifactId>
         </dependency>
-
-        <!-- Codahale -->
         <dependency>
             <groupId>io.dropwizard.metrics</groupId>
             <artifactId>metrics-core</artifactId>
         </dependency>
-
-        <!-- Scala -->
         <dependency>
-            <groupId>org.scala-lang</groupId>
-            <artifactId>scala-library</artifactId>
+            <groupId>org.eclipse.jdt</groupId>
+            <artifactId>org.eclipse.jdt.annotation</artifactId>
         </dependency>
-
-        <!-- Clustering commons for metrics -->
         <dependency>
             <groupId>org.opendaylight.controller</groupId>
-            <artifactId>sal-clustering-commons</artifactId>
-        </dependency>
-
-        <!-- Atomix -->
-        <dependency>
-            <groupId>io.atomix</groupId>
             <artifactId>atomix-storage</artifactId>
-            <version>3.1.5</version>
-            <scope>provided</scope>
-        </dependency>
-        <dependency>
-            <groupId>io.atomix</groupId>
-            <artifactId>atomix-utils</artifactId>
-            <version>3.1.5</version>
-            <scope>provided</scope>
         </dependency>
         <dependency>
-            <groupId>com.esotericsoftware</groupId>
-            <artifactId>kryo</artifactId>
-            <version>4.0.2</version>
-            <scope>provided</scope>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>repackaged-akka</artifactId>
         </dependency>
         <dependency>
-            <groupId>com.esotericsoftware</groupId>
-            <artifactId>minlog</artifactId>
-            <version>1.3.1</version>
-            <scope>provided</scope>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>sal-clustering-commons</artifactId>
         </dependency>
         <dependency>
-            <groupId>com.esotericsoftware</groupId>
-            <artifactId>reflectasm</artifactId>
-            <version>1.11.8</version>
-            <scope>provided</scope>
+            <groupId>org.scala-lang</groupId>
+            <artifactId>scala-library</artifactId>
         </dependency>
+
         <dependency>
-            <groupId>org.ow2.asm</groupId>
-            <artifactId>asm</artifactId>
-            <version>5.2</version>
-            <scope>provided</scope>
+            <groupId>com.typesafe.akka</groupId>
+            <artifactId>akka-testkit_2.13</artifactId>
         </dependency>
         <dependency>
-            <groupId>org.objenesis</groupId>
-            <artifactId>objenesis</artifactId>
-            <version>2.6</version>
-            <scope>provided</scope>
+            <groupId>com.typesafe.akka</groupId>
+            <artifactId>akka-persistence-tck_2.13</artifactId>
         </dependency>
-
         <dependency>
             <groupId>commons-io</groupId>
             <artifactId>commons-io</artifactId>
@@ -108,34 +73,6 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
         </dependency>
     </dependencies>
 
-    <build>
-        <plugins>
-            <plugin>
-                <groupId>org.apache.felix</groupId>
-                <artifactId>maven-bundle-plugin</artifactId>
-                <extensions>true</extensions>
-                <configuration>
-                    <instructions>
-                        <Bundle-Name>${project.groupId}.${project.artifactId}</Bundle-Name>
-                        <Import-Package>
-                            !COM.newmonics.*,
-                            !android.os,
-                            *
-                        </Import-Package>
-                        <Embed-Dependency>
-                            <!-- atomix.io is using an older Guava, and Kryo is using ancient objenesis,
-                                 so let's embed it to prevent duplicates -->
-                            *;inline=true;groupId=io.atomix,
-                            *;inline=true;groupId=com.esotericsoftware,
-                            *;inline=true;groupId=org.objenesis,
-                            *;inline=true;groupId=org.ow2.asm,
-                        </Embed-Dependency>
-                    </instructions>
-                </configuration>
-            </plugin>
-        </plugins>
-    </build>
-
     <scm>
         <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
         <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
index 678749b1c16cf9a4ddd9d0a052d763b70541ab19..b89ebf4eb131b2c1b458781c98954f06bf900b09 100644 (file)
@@ -13,6 +13,7 @@ import com.codahale.metrics.Histogram;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.ReplayMessages;
 import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WriteMessages;
+import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WrittenMessages;
 
 /**
  * Abstraction of a data journal. This provides a unified interface towards {@link SegmentedJournalActor}, allowing
@@ -79,7 +80,13 @@ abstract class DataJournal {
     /**
      * Handle a request to store some messages.
      *
-     * @param message Request message
+     * @param message {@link WriteMessages} message
+     * @return a {@link WrittenMessages} object
+     */
+    abstract @NonNull WrittenMessages handleWriteMessages(@NonNull WriteMessages message);
+
+    /**
+     * Flush all messages to durable storage.
      */
-    abstract void handleWriteMessages(@NonNull WriteMessages message);
+    abstract void flush();
 }
index 6899c6e1d652d518dd49ec15e1692ac409b51661..fdd0b80d03c7a8ae24afac26b655e092a3afb1c4 100644 (file)
@@ -10,16 +10,13 @@ package org.opendaylight.controller.akka.segjournal;
 import static java.util.Objects.requireNonNull;
 
 import akka.persistence.PersistentRepr;
-import io.atomix.storage.journal.JournalSegment;
 
 /**
  * A single entry in the data journal. We do not store {@code persistenceId} for each entry, as that is a
- * journal-invariant, nor do we store {@code sequenceNr}, as that information is maintained by {@link JournalSegment}'s
- * index.
- *
- * @author Robert Varga
+ * journal-invariant, nor do we store {@code sequenceNr}, as that information is maintained by a particular journal
+ * segment's index.
  */
-abstract class DataJournalEntry {
+abstract sealed class DataJournalEntry {
     /**
      * A single data journal entry on its way to the backing file.
      */
diff --git a/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalEntrySerdes.java b/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalEntrySerdes.java
new file mode 100644 (file)
index 0000000..e0d7be1
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2019 Pantheon Technologies, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.akka.segjournal;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.ActorSystem;
+import akka.actor.ExtendedActorSystem;
+import akka.persistence.PersistentRepr;
+import akka.serialization.JavaSerializer;
+import com.google.common.base.VerifyException;
+import io.atomix.storage.journal.JournalSerdes.EntryInput;
+import io.atomix.storage.journal.JournalSerdes.EntryOutput;
+import io.atomix.storage.journal.JournalSerdes.EntrySerdes;
+import java.io.IOException;
+import java.util.concurrent.Callable;
+import org.opendaylight.controller.akka.segjournal.DataJournalEntry.FromPersistence;
+import org.opendaylight.controller.akka.segjournal.DataJournalEntry.ToPersistence;
+
+/**
+ * Kryo serializer for {@link DataJournalEntry}. Each {@link SegmentedJournalActor} has its own instance, as well as
+ * a nested JavaSerializer to handle the payload.
+ *
+ * <p>
+ * Since we are persisting only parts of {@link PersistentRepr}, this class asymmetric by design:
+ * {@link #write(EntryOutput, DataJournalEntry)} only accepts {@link ToPersistence} subclass, which is a wrapper
+ * around a {@link PersistentRepr}, while {@link #read(EntryInput)} produces an {@link FromPersistence}, which
+ * needs further processing to reconstruct a {@link PersistentRepr}.
+ */
+final class DataJournalEntrySerdes implements EntrySerdes<DataJournalEntry> {
+    private final ExtendedActorSystem actorSystem;
+
+    DataJournalEntrySerdes(final ActorSystem actorSystem) {
+        this.actorSystem = requireNonNull((ExtendedActorSystem) actorSystem);
+    }
+
+    @Override
+    public void write(final EntryOutput output, final DataJournalEntry entry) throws IOException {
+        if (entry instanceof ToPersistence toPersistence) {
+            final var repr = toPersistence.repr();
+            output.writeString(repr.manifest());
+            output.writeString(repr.writerUuid());
+            output.writeObject(repr.payload());
+        } else {
+            throw new VerifyException("Unexpected entry " + entry);
+        }
+    }
+
+    @Override
+    public DataJournalEntry read(final EntryInput input) throws IOException {
+        return new FromPersistence(input.readString(), input.readString(),
+            JavaSerializer.currentSystem().withValue(actorSystem, (Callable<Object>) input::readObject));
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalEntrySerializer.java b/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalEntrySerializer.java
deleted file mode 100644 (file)
index e248262..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2019 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.akka.segjournal;
-
-import static com.google.common.base.Verify.verify;
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSystem;
-import akka.actor.ExtendedActorSystem;
-import akka.persistence.PersistentRepr;
-import com.esotericsoftware.kryo.Kryo;
-import com.esotericsoftware.kryo.Serializer;
-import com.esotericsoftware.kryo.io.Input;
-import com.esotericsoftware.kryo.io.Output;
-import com.esotericsoftware.kryo.serializers.JavaSerializer;
-import java.util.concurrent.Callable;
-import org.opendaylight.controller.akka.segjournal.DataJournalEntry.FromPersistence;
-import org.opendaylight.controller.akka.segjournal.DataJournalEntry.ToPersistence;
-
-/**
- * Kryo serializer for {@link DataJournalEntry}. Each {@link SegmentedJournalActor} has its own instance, as well as
- * a nested JavaSerializer to handle the payload.
- *
- * <p>
- * Since we are persisting only parts of {@link PersistentRepr}, this class asymmetric by design:
- * {@link #write(Kryo, Output, DataJournalEntry)} only accepts {@link ToPersistence} subclass, which is a wrapper
- * around a {@link PersistentRepr}, while {@link #read(Kryo, Input, Class)} produces an {@link FromPersistence}, which
- * needs further processing to reconstruct a {@link PersistentRepr}.
- *
- * @author Robert Varga
- */
-final class DataJournalEntrySerializer extends Serializer<DataJournalEntry> {
-    private final JavaSerializer serializer = new JavaSerializer();
-    private final ExtendedActorSystem actorSystem;
-
-    DataJournalEntrySerializer(final ActorSystem actorSystem) {
-        this.actorSystem = requireNonNull((ExtendedActorSystem) actorSystem);
-    }
-
-    @Override
-    public void write(final Kryo kryo, final Output output, final DataJournalEntry object) {
-        verify(object instanceof ToPersistence);
-        final PersistentRepr repr = ((ToPersistence) object).repr();
-        output.writeString(repr.manifest());
-        output.writeString(repr.writerUuid());
-        serializer.write(kryo, output, repr.payload());
-    }
-
-    @Override
-    public DataJournalEntry read(final Kryo kryo, final Input input, final Class<DataJournalEntry> type) {
-        final String manifest = input.readString();
-        final String uuid = input.readString();
-        final Object payload = akka.serialization.JavaSerializer.currentSystem().withValue(actorSystem,
-            (Callable<Object>)() -> serializer.read(kryo, input, type));
-        return new FromPersistence(manifest, uuid, payload);
-    }
-}
index bc5eead800c938ab5bde4eb55d5dc732da234776..243a064b80fea81b9fed2a522dacb9f0105aabc4 100644 (file)
@@ -7,32 +7,30 @@
  */
 package org.opendaylight.controller.akka.segjournal;
 
-import static com.google.common.base.Verify.verify;
-
 import akka.actor.ActorSystem;
-import akka.persistence.AtomicWrite;
 import akka.persistence.PersistentRepr;
 import com.codahale.metrics.Histogram;
-import io.atomix.storage.StorageLevel;
-import io.atomix.storage.journal.Indexed;
+import com.google.common.base.VerifyException;
+import io.atomix.storage.journal.JournalReader;
+import io.atomix.storage.journal.JournalSerdes;
+import io.atomix.storage.journal.JournalWriter;
 import io.atomix.storage.journal.SegmentedJournal;
-import io.atomix.storage.journal.SegmentedJournalReader;
-import io.atomix.storage.journal.SegmentedJournalWriter;
-import io.atomix.utils.serializer.Namespace;
+import io.atomix.storage.journal.StorageLevel;
 import java.io.File;
 import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
 import org.opendaylight.controller.akka.segjournal.DataJournalEntry.FromPersistence;
 import org.opendaylight.controller.akka.segjournal.DataJournalEntry.ToPersistence;
 import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.ReplayMessages;
 import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WriteMessages;
+import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WrittenMessages;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.jdk.javaapi.CollectionConverters;
 
 /**
  * Version 0 data journal, where every journal entry maps to exactly one segmented file entry.
- *
- * @author Robert Varga
  */
 final class DataJournalV0 extends DataJournal {
     private static final Logger LOG = LoggerFactory.getLogger(DataJournalV0.class);
@@ -44,8 +42,8 @@ final class DataJournalV0 extends DataJournal {
         super(persistenceId, messageSize);
         entries = SegmentedJournal.<DataJournalEntry>builder()
                 .withStorageLevel(storage).withDirectory(directory).withName("data")
-                .withNamespace(Namespace.builder()
-                    .register(new DataJournalEntrySerializer(system), FromPersistence.class, ToPersistence.class)
+                .withNamespace(JournalSerdes.builder()
+                    .register(new DataJournalEntrySerdes(system), FromPersistence.class, ToPersistence.class)
                     .build())
                 .withMaxEntrySize(maxEntrySize).withMaxSegmentSize(maxSegmentSize)
                 .build();
@@ -68,31 +66,20 @@ final class DataJournalV0 extends DataJournal {
 
     @Override
     void close() {
+        flush();
         entries.close();
     }
 
+    @Override
+    void flush() {
+        entries.writer().flush();
+    }
+
     @Override
     @SuppressWarnings("checkstyle:illegalCatch")
     void handleReplayMessages(final ReplayMessages message, final long fromSequenceNr) {
-        try (SegmentedJournalReader<DataJournalEntry> reader = entries.openReader(fromSequenceNr)) {
-            int count = 0;
-            while (reader.hasNext() && count < message.max) {
-                final Indexed<DataJournalEntry> next = reader.next();
-                if (next.index() > message.toSequenceNr) {
-                    break;
-                }
-
-                LOG.trace("{}: replay {}", persistenceId, next);
-                updateLargestSize(next.size());
-                final DataJournalEntry entry = next.entry();
-                verify(entry instanceof FromPersistence, "Unexpected entry %s", entry);
-
-                final PersistentRepr repr = ((FromPersistence) entry).toRepr(persistenceId, next.index());
-                LOG.debug("{}: replaying {}", persistenceId, repr);
-                message.replayCallback.accept(repr);
-                count++;
-            }
-            LOG.debug("{}: successfully replayed {} entries", persistenceId, count);
+        try (var reader = entries.openReader(fromSequenceNr)) {
+            handleReplayMessages(reader, message);
         } catch (Exception e) {
             LOG.warn("{}: failed to replay messages for {}", persistenceId, message, e);
             message.promise.failure(e);
@@ -101,34 +88,73 @@ final class DataJournalV0 extends DataJournal {
         }
     }
 
+    private void handleReplayMessages(final JournalReader<DataJournalEntry> reader, final ReplayMessages message) {
+        int count = 0;
+        while (count < message.max) {
+            final var next = reader.tryNext();
+            if (next == null || next.index() > message.toSequenceNr) {
+                break;
+            }
+
+            LOG.trace("{}: replay {}", persistenceId, next);
+            updateLargestSize(next.size());
+            final var entry = next.entry();
+            if (entry instanceof FromPersistence fromPersistence) {
+                final var repr = fromPersistence.toRepr(persistenceId, next.index());
+                LOG.debug("{}: replaying {}", persistenceId, repr);
+                message.replayCallback.accept(repr);
+                count++;
+            } else {
+                throw new VerifyException("Unexpected entry " + entry);
+            }
+        }
+        LOG.debug("{}: successfully replayed {} entries", persistenceId, count);
+    }
+
     @Override
     @SuppressWarnings("checkstyle:illegalCatch")
-    void handleWriteMessages(final WriteMessages message) {
+    WrittenMessages handleWriteMessages(final WriteMessages message) {
         final int count = message.size();
-        final SegmentedJournalWriter<DataJournalEntry> writer = entries.writer();
+        final var responses = new ArrayList<>();
+        final var writer = entries.writer();
+        long writtenBytes = 0;
 
         for (int i = 0; i < count; ++i) {
             final long mark = writer.getLastIndex();
-            final AtomicWrite request = message.getRequest(i);
+            final var request = message.getRequest(i);
+
+            final var reprs = CollectionConverters.asJava(request.payload());
+            LOG.trace("{}: append {}/{}: {} items at mark {}", persistenceId, i, count, reprs.size(), mark);
             try {
-                for (PersistentRepr repr : CollectionConverters.asJava(request.payload())) {
-                    final Object payload = repr.payload();
-                    if (!(payload instanceof Serializable)) {
-                        throw new UnsupportedOperationException("Non-serializable payload encountered "
-                                + payload.getClass());
-                    }
-
-                    recordMessageSize(writer.append(new ToPersistence(repr)).size());
-                }
+                writtenBytes += writePayload(writer, reprs);
             } catch (Exception e) {
-                LOG.warn("{}: failed to write out request", persistenceId, e);
-                message.setFailure(i, e);
+                LOG.warn("{}: failed to write out request {}/{} reverting to {}", persistenceId, i, count, mark, e);
+                responses.add(e);
                 writer.truncate(mark);
                 continue;
             }
+            responses.add(null);
+        }
+
+        return new WrittenMessages(message, responses, writtenBytes);
+    }
+
+    private long writePayload(final JournalWriter<DataJournalEntry> writer, final List<PersistentRepr> reprs) {
+        long bytes = 0;
+        for (var repr : reprs) {
+            final Object payload = repr.payload();
+            if (!(payload instanceof Serializable)) {
+                throw new UnsupportedOperationException("Non-serializable payload encountered "
+                        + payload.getClass());
+            }
 
-            message.setSuccess(i);
+            LOG.trace("{}: starting append of {}", persistenceId, payload);
+            final var entry = writer.append(new ToPersistence(repr));
+            final int size = entry.size();
+            LOG.trace("{}: finished append of {} with {} bytes at {}", persistenceId, payload, size, entry.index());
+            recordMessageSize(size);
+            bytes += size;
         }
-        writer.flush();
+        return bytes;
     }
 }
diff --git a/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/LongEntrySerdes.java b/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/LongEntrySerdes.java
new file mode 100644 (file)
index 0000000..eebf95f
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2023 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.akka.segjournal;
+
+import io.atomix.storage.journal.JournalSerdes.EntryInput;
+import io.atomix.storage.journal.JournalSerdes.EntryOutput;
+import io.atomix.storage.journal.JournalSerdes.EntrySerdes;
+import java.io.IOException;
+
+enum LongEntrySerdes implements EntrySerdes<Long> {
+    LONG_ENTRY_SERDES {
+        @Override
+        public Long read(final EntryInput input) throws IOException {
+            return input.readLong();
+        }
+
+        @Override
+        public void write(final EntryOutput output, final Long entry) throws IOException {
+            output.writeLong(entry);
+        }
+    }
+}
index 8efb2db3ab5f3878c62c0934cdcb17732bc2b387..b9320998c95f28b7fcbd8eb170bf1842d3b83a47 100644 (file)
@@ -17,15 +17,13 @@ import akka.persistence.AtomicWrite;
 import akka.persistence.PersistentRepr;
 import akka.persistence.journal.japi.AsyncWriteJournal;
 import com.typesafe.config.Config;
-import com.typesafe.config.ConfigMemorySize;
-import io.atomix.storage.StorageLevel;
 import io.atomix.storage.journal.SegmentedJournal;
+import io.atomix.storage.journal.StorageLevel;
 import java.io.File;
 import java.net.URLEncoder;
 import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
 import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
 import java.util.Optional;
 import java.util.function.Consumer;
@@ -39,8 +37,6 @@ import scala.concurrent.Future;
  * An Akka persistence journal implementation on top of {@link SegmentedJournal}. This actor represents aggregation
  * of multiple journals and performs a receptionist job between Akka and invidual per-persistenceId actors. See
  * {@link SegmentedJournalActor} for details on how the persistence works.
- *
- * @author Robert Varga
  */
 public class SegmentedFileJournal extends AsyncWriteJournal {
     public static final String STORAGE_ROOT_DIRECTORY = "root-directory";
@@ -48,6 +44,7 @@ public class SegmentedFileJournal extends AsyncWriteJournal {
     public static final int STORAGE_MAX_ENTRY_SIZE_DEFAULT = 16 * 1024 * 1024;
     public static final String STORAGE_MAX_SEGMENT_SIZE = "max-segment-size";
     public static final int STORAGE_MAX_SEGMENT_SIZE_DEFAULT = STORAGE_MAX_ENTRY_SIZE_DEFAULT * 8;
+    public static final String STORAGE_MAX_UNFLUSHED_BYTES = "max-unflushed-bytes";
     public static final String STORAGE_MEMORY_MAPPED = "memory-mapped";
 
     private static final Logger LOG = LoggerFactory.getLogger(SegmentedFileJournal.class);
@@ -57,6 +54,7 @@ public class SegmentedFileJournal extends AsyncWriteJournal {
     private final StorageLevel storage;
     private final int maxEntrySize;
     private final int maxSegmentSize;
+    private final int maxUnflushedBytes;
 
     public SegmentedFileJournal(final Config config) {
         rootDir = new File(config.getString(STORAGE_ROOT_DIRECTORY));
@@ -68,6 +66,7 @@ public class SegmentedFileJournal extends AsyncWriteJournal {
 
         maxEntrySize = getBytes(config, STORAGE_MAX_ENTRY_SIZE, STORAGE_MAX_ENTRY_SIZE_DEFAULT);
         maxSegmentSize = getBytes(config, STORAGE_MAX_SEGMENT_SIZE, STORAGE_MAX_SEGMENT_SIZE_DEFAULT);
+        maxUnflushedBytes = getBytes(config, STORAGE_MAX_UNFLUSHED_BYTES, maxEntrySize);
 
         if (config.hasPath(STORAGE_MEMORY_MAPPED)) {
             storage = config.getBoolean(STORAGE_MEMORY_MAPPED) ? StorageLevel.MAPPED : StorageLevel.DISK;
@@ -80,12 +79,12 @@ public class SegmentedFileJournal extends AsyncWriteJournal {
 
     @Override
     public Future<Iterable<Optional<Exception>>> doAsyncWriteMessages(final Iterable<AtomicWrite> messages) {
-        final Map<ActorRef, WriteMessages> map = new HashMap<>();
-        final List<Future<Optional<Exception>>> result = new ArrayList<>();
+        final var map = new HashMap<ActorRef, WriteMessages>();
+        final var result = new ArrayList<Future<Optional<Exception>>>();
 
-        for (AtomicWrite message : messages) {
-            final String persistenceId = message.persistenceId();
-            final ActorRef handler = handlers.computeIfAbsent(persistenceId, this::createHandler);
+        for (var message : messages) {
+            final var persistenceId = message.persistenceId();
+            final var handler = handlers.computeIfAbsent(persistenceId, this::createHandler);
             result.add(map.computeIfAbsent(handler, key -> new WriteMessages()).add(message));
         }
 
@@ -116,18 +115,18 @@ public class SegmentedFileJournal extends AsyncWriteJournal {
     }
 
     private ActorRef createHandler(final String persistenceId) {
-        final String directoryName = URLEncoder.encode(persistenceId, StandardCharsets.UTF_8);
-        final File directory = new File(rootDir, directoryName);
+        final var directoryName = URLEncoder.encode(persistenceId, StandardCharsets.UTF_8);
+        final var directory = new File(rootDir, directoryName);
         LOG.debug("Creating handler for {} in directory {}", persistenceId, directory);
 
-        final ActorRef handler = context().actorOf(SegmentedJournalActor.props(persistenceId, directory, storage,
-            maxEntrySize, maxSegmentSize));
+        final var handler = context().actorOf(SegmentedJournalActor.props(persistenceId, directory, storage,
+            maxEntrySize, maxSegmentSize, maxUnflushedBytes));
         LOG.debug("Directory {} handled by {}", directory, handler);
         return handler;
     }
 
     private <T> Future<T> delegateMessage(final String persistenceId, final AsyncMessage<T> message) {
-        final ActorRef handler = handlers.get(persistenceId);
+        final var handler = handlers.get(persistenceId);
         if (handler == null) {
             return Futures.failed(new IllegalStateException("Cannot find handler for " + persistenceId));
         }
@@ -145,9 +144,8 @@ public class SegmentedFileJournal extends AsyncWriteJournal {
         if (!config.hasPath(path)) {
             return defaultValue;
         }
-        final ConfigMemorySize value = config.getMemorySize(path);
-        final long result = value.toBytes();
-        checkArgument(result <= Integer.MAX_VALUE, "Size %s exceeds maximum allowed %s", Integer.MAX_VALUE);
-        return (int) result;
+        final long value = config.getBytes(path);
+        checkArgument(value <= Integer.MAX_VALUE, "Size %s exceeds maximum allowed %s", Integer.MAX_VALUE);
+        return (int) value;
     }
 }
index e5c5b7807b0b9ea016cdc4aa0835f49daf5ff8dd..9f63892d26fdfd949b46d0bb9213bd84df2deb80 100644 (file)
@@ -7,11 +7,14 @@
  */
 package org.opendaylight.controller.akka.segjournal;
 
+import static com.google.common.base.Verify.verify;
 import static com.google.common.base.Verify.verifyNotNull;
 import static java.util.Objects.requireNonNull;
 
 import akka.actor.AbstractActor;
+import akka.actor.ActorRef;
 import akka.actor.Props;
+import akka.japi.pf.ReceiveBuilder;
 import akka.persistence.AtomicWrite;
 import akka.persistence.PersistentRepr;
 import com.codahale.metrics.Histogram;
@@ -19,12 +22,13 @@ import com.codahale.metrics.Meter;
 import com.codahale.metrics.MetricRegistry;
 import com.codahale.metrics.Timer;
 import com.google.common.base.MoreObjects;
-import io.atomix.storage.StorageLevel;
+import com.google.common.base.Stopwatch;
 import io.atomix.storage.journal.Indexed;
+import io.atomix.storage.journal.JournalSerdes;
 import io.atomix.storage.journal.SegmentedJournal;
-import io.atomix.storage.journal.SegmentedJournalWriter;
-import io.atomix.utils.serializer.Namespace;
+import io.atomix.storage.journal.StorageLevel;
 import java.io.File;
+import java.util.ArrayDeque;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Optional;
@@ -56,11 +60,9 @@ import scala.concurrent.Promise;
  * <p>
  * Split-file approach allows us to treat sequence numbers and indices as equivalent, without maintaining any explicit
  * mapping information. The only additional information we need to maintain is the last deleted sequence number.
- *
- * @author Robert Varga
  */
-final class SegmentedJournalActor extends AbstractActor {
-    abstract static class AsyncMessage<T> {
+abstract sealed class SegmentedJournalActor extends AbstractActor {
+    abstract static sealed class AsyncMessage<T> {
         final Promise<T> promise = Promise.apply();
     }
 
@@ -103,7 +105,7 @@ final class SegmentedJournalActor extends AbstractActor {
         private final List<Promise<Optional<Exception>>> results = new ArrayList<>();
 
         Future<Optional<Exception>> add(final AtomicWrite write) {
-            final Promise<Optional<Exception>> promise = Promise.apply();
+            final var promise = Promise.<Optional<Exception>>apply();
             requests.add(write);
             results.add(promise);
             return promise.future();
@@ -145,8 +147,138 @@ final class SegmentedJournalActor extends AbstractActor {
         }
     }
 
+    // responses == null on success, Exception on failure
+    record WrittenMessages(WriteMessages message, List<Object> responses, long writtenBytes) {
+        WrittenMessages {
+            verify(responses.size() == message.size(), "Mismatched %s and %s", message, responses);
+            verify(writtenBytes >= 0, "Unexpected length %s", writtenBytes);
+        }
+
+        private void complete() {
+            for (int i = 0, size = responses.size(); i < size; ++i) {
+                if (responses.get(i) instanceof Exception ex) {
+                    message.setFailure(i, ex);
+                } else {
+                    message.setSuccess(i);
+                }
+            }
+        }
+    }
+
+    /**
+     * A {@link SegmentedJournalActor} which delays issuing a flush operation until a watermark is reached or when the
+     * queue is empty.
+     *
+     * <p>
+     * The problem we are addressing is that there is a queue sitting in from of the actor, which we have no direct
+     * access to. Since a flush involves committing data to durable storage, that operation can easily end up dominating
+     * workloads.
+     *
+     * <p>
+     * We solve this by having an additional queue in which we track which messages were written and trigger a flush
+     * only when the number of bytes we have written exceeds specified limit. The other part is that each time this
+     * queue becomes non-empty, we send a dedicated message to self. This acts as a actor queue probe -- when we receive
+     * it, we know we have processed all messages that were in the queue when we first delayed the write.
+     *
+     * <p>
+     * The combination of these mechanisms ensure we use a minimal delay while also ensuring we take advantage of
+     * batching opportunities.
+     */
+    private static final class Delayed extends SegmentedJournalActor {
+        private static final class Flush extends AsyncMessage<Void> {
+            final long batch;
+
+            Flush(final long batch) {
+                this.batch = batch;
+            }
+        }
+
+        private final ArrayDeque<WrittenMessages> unflushedWrites = new ArrayDeque<>();
+        private final Stopwatch unflushedDuration = Stopwatch.createUnstarted();
+        private final long maxUnflushedBytes;
+
+        private long batch = 0;
+        private long unflushedBytes = 0;
+
+        Delayed(final String persistenceId, final File directory, final StorageLevel storage,
+                final int maxEntrySize, final int maxSegmentSize, final int maxUnflushedBytes) {
+            super(persistenceId, directory, storage, maxEntrySize, maxSegmentSize);
+            this.maxUnflushedBytes = maxUnflushedBytes;
+        }
+
+        @Override
+        ReceiveBuilder addMessages(final ReceiveBuilder builder) {
+            return super.addMessages(builder).match(Flush.class, this::handleFlush);
+        }
+
+        private void handleFlush(final Flush message) {
+            if (message.batch == batch) {
+                flushWrites();
+            } else {
+                LOG.debug("{}: batch {} not flushed by {}", persistenceId(), batch, message.batch);
+            }
+        }
+
+        @Override
+        void onWrittenMessages(final WrittenMessages message) {
+            boolean first = unflushedWrites.isEmpty();
+            if (first) {
+                unflushedDuration.start();
+            }
+            unflushedWrites.addLast(message);
+            unflushedBytes = unflushedBytes + message.writtenBytes;
+            if (unflushedBytes >= maxUnflushedBytes) {
+                LOG.debug("{}: reached {} unflushed journal bytes", persistenceId(), unflushedBytes);
+                flushWrites();
+            } else if (first) {
+                LOG.debug("{}: deferring journal flush", persistenceId());
+                self().tell(new Flush(++batch), ActorRef.noSender());
+            }
+        }
+
+        @Override
+        void flushWrites() {
+            final var unsyncedSize = unflushedWrites.size();
+            if (unsyncedSize == 0) {
+                // Nothing to flush
+                return;
+            }
+
+            LOG.debug("{}: flushing {} journal writes after {}", persistenceId(), unsyncedSize,
+                unflushedDuration.stop());
+            flushJournal(unflushedBytes, unsyncedSize);
+
+            final var sw = Stopwatch.createStarted();
+            unflushedWrites.forEach(WrittenMessages::complete);
+            unflushedWrites.clear();
+            unflushedBytes = 0;
+            unflushedDuration.reset();
+            LOG.debug("{}: completed {} flushed journal writes in {}", persistenceId(), unsyncedSize, sw);
+        }
+    }
+
+    private static final class Immediate extends SegmentedJournalActor {
+        Immediate(final String persistenceId, final File directory, final StorageLevel storage,
+                final int maxEntrySize, final int maxSegmentSize) {
+            super(persistenceId, directory, storage, maxEntrySize, maxSegmentSize);
+        }
+
+        @Override
+        void onWrittenMessages(final WrittenMessages message) {
+            flushJournal(message.writtenBytes, 1);
+            message.complete();
+        }
+
+        @Override
+        void flushWrites() {
+            // No-op
+        }
+    }
+
     private static final Logger LOG = LoggerFactory.getLogger(SegmentedJournalActor.class);
-    private static final Namespace DELETE_NAMESPACE = Namespace.builder().register(Long.class).build();
+    private static final JournalSerdes DELETE_NAMESPACE = JournalSerdes.builder()
+        .register(LongEntrySerdes.LONG_ENTRY_SERDES, Long.class)
+        .build();
     private static final int DELETE_SEGMENT_SIZE = 64 * 1024;
 
     private final String persistenceId;
@@ -161,12 +293,18 @@ final class SegmentedJournalActor extends AbstractActor {
     private Meter messageWriteCount;
     // Tracks the size distribution of messages
     private Histogram messageSize;
+    // Tracks the number of messages completed for each flush
+    private Histogram flushMessages;
+    // Tracks the number of bytes completed for each flush
+    private Histogram flushBytes;
+    // Tracks the duration of flush operations
+    private Timer flushTime;
 
     private DataJournal dataJournal;
     private SegmentedJournal<Long> deleteJournal;
     private long lastDelete;
 
-    SegmentedJournalActor(final String persistenceId, final File directory, final StorageLevel storage,
+    private SegmentedJournalActor(final String persistenceId, final File directory, final StorageLevel storage,
             final int maxEntrySize, final int maxSegmentSize) {
         this.persistenceId = requireNonNull(persistenceId);
         this.directory = requireNonNull(directory);
@@ -176,20 +314,39 @@ final class SegmentedJournalActor extends AbstractActor {
     }
 
     static Props props(final String persistenceId, final File directory, final StorageLevel storage,
-            final int maxEntrySize, final int maxSegmentSize) {
-        return Props.create(SegmentedJournalActor.class, requireNonNull(persistenceId), directory, storage,
-            maxEntrySize, maxSegmentSize);
+            final int maxEntrySize, final int maxSegmentSize, final int maxUnflushedBytes) {
+        final var pid = requireNonNull(persistenceId);
+        return maxUnflushedBytes > 0
+            ? Props.create(Delayed.class, pid, directory, storage, maxEntrySize, maxSegmentSize, maxUnflushedBytes)
+            : Props.create(Immediate.class, pid, directory, storage, maxEntrySize, maxSegmentSize);
+    }
+
+    final String persistenceId() {
+        return persistenceId;
+    }
+
+    final void flushJournal(final long bytes, final int messages) {
+        final var sw = Stopwatch.createStarted();
+        dataJournal.flush();
+        LOG.debug("{}: journal flush completed in {}", persistenceId, sw.stop());
+        flushBytes.update(bytes);
+        flushMessages.update(messages);
+        flushTime.update(sw.elapsed(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS);
     }
 
     @Override
     public Receive createReceive() {
-        return receiveBuilder()
-                .match(DeleteMessagesTo.class, this::handleDeleteMessagesTo)
-                .match(ReadHighestSequenceNr.class, this::handleReadHighestSequenceNr)
-                .match(ReplayMessages.class, this::handleReplayMessages)
-                .match(WriteMessages.class, this::handleWriteMessages)
-                .matchAny(this::handleUnknown)
-                .build();
+        return addMessages(receiveBuilder())
+            .matchAny(this::handleUnknown)
+            .build();
+    }
+
+    ReceiveBuilder addMessages(final ReceiveBuilder builder) {
+        return builder
+            .match(DeleteMessagesTo.class, this::handleDeleteMessagesTo)
+            .match(ReadHighestSequenceNr.class, this::handleReadHighestSequenceNr)
+            .match(ReplayMessages.class, this::handleReplayMessages)
+            .match(WriteMessages.class, this::handleWriteMessages);
     }
 
     @Override
@@ -197,12 +354,15 @@ final class SegmentedJournalActor extends AbstractActor {
         LOG.debug("{}: actor starting", persistenceId);
         super.preStart();
 
-        final MetricRegistry registry = MetricsReporter.getInstance(MeteringBehavior.DOMAIN).getMetricsRegistry();
-        final String actorName = self().path().parent().toStringWithoutAddress() + '/' + directory.getName();
+        final var registry = MetricsReporter.getInstance(MeteringBehavior.DOMAIN).getMetricsRegistry();
+        final var actorName = self().path().parent().toStringWithoutAddress() + '/' + directory.getName();
 
         batchWriteTime = registry.timer(MetricRegistry.name(actorName, "batchWriteTime"));
         messageWriteCount = registry.meter(MetricRegistry.name(actorName, "messageWriteCount"));
         messageSize = registry.histogram(MetricRegistry.name(actorName, "messageSize"));
+        flushBytes = registry.histogram(MetricRegistry.name(actorName, "flushBytes"));
+        flushMessages = registry.histogram(MetricRegistry.name(actorName, "flushMessages"));
+        flushTime = registry.timer(MetricRegistry.name(actorName, "flushTime"));
     }
 
     @Override
@@ -239,6 +399,8 @@ final class SegmentedJournalActor extends AbstractActor {
         ensureOpen();
 
         LOG.debug("{}: delete messages {}", persistenceId, message);
+        flushWrites();
+
         final long to = Long.min(dataJournal.lastWrittenSequenceNr(), message.toSequenceNr);
         LOG.debug("{}: adjusted delete to {}", persistenceId, to);
 
@@ -246,8 +408,8 @@ final class SegmentedJournalActor extends AbstractActor {
             LOG.debug("{}: deleting entries up to {}", persistenceId, to);
 
             lastDelete = to;
-            final SegmentedJournalWriter<Long> deleteWriter = deleteJournal.writer();
-            final Indexed<Long> entry = deleteWriter.append(lastDelete);
+            final var deleteWriter = deleteJournal.writer();
+            final var entry = deleteWriter.append(lastDelete);
             deleteWriter.commit(entry.index());
             dataJournal.deleteTo(lastDelete);
 
@@ -267,6 +429,7 @@ final class SegmentedJournalActor extends AbstractActor {
         final Long sequence;
         if (directory.isDirectory()) {
             ensureOpen();
+            flushWrites();
             sequence = dataJournal.lastWrittenSequenceNr();
         } else {
             sequence = 0L;
@@ -279,6 +442,7 @@ final class SegmentedJournalActor extends AbstractActor {
     private void handleReplayMessages(final ReplayMessages message) {
         LOG.debug("{}: replaying messages {}", persistenceId, message);
         ensureOpen();
+        flushWrites();
 
         final long from = Long.max(lastDelete + 1, message.fromSequenceNr);
         LOG.debug("{}: adjusted fromSequenceNr to {}", persistenceId, from);
@@ -289,15 +453,26 @@ final class SegmentedJournalActor extends AbstractActor {
     private void handleWriteMessages(final WriteMessages message) {
         ensureOpen();
 
-        final long startTicks = System.nanoTime();
+        final var sw = Stopwatch.createStarted();
         final long start = dataJournal.lastWrittenSequenceNr();
+        final var writtenMessages = dataJournal.handleWriteMessages(message);
+        sw.stop();
 
-        dataJournal.handleWriteMessages(message);
-
-        batchWriteTime.update(System.nanoTime() - startTicks, TimeUnit.NANOSECONDS);
+        batchWriteTime.update(sw.elapsed(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS);
         messageWriteCount.mark(dataJournal.lastWrittenSequenceNr() - start);
+
+        // log message after statistics are updated
+        LOG.debug("{}: write of {} bytes completed in {}", persistenceId, writtenMessages.writtenBytes, sw);
+        onWrittenMessages(writtenMessages);
     }
 
+    /**
+     * Handle a check of written messages.
+     *
+     * @param message Messages which were written
+     */
+    abstract void onWrittenMessages(WrittenMessages message);
+
     private void handleUnknown(final Object message) {
         LOG.error("{}: Received unknown message {}", persistenceId, message);
     }
@@ -308,15 +483,19 @@ final class SegmentedJournalActor extends AbstractActor {
             return;
         }
 
+        final var sw = Stopwatch.createStarted();
         deleteJournal = SegmentedJournal.<Long>builder().withDirectory(directory).withName("delete")
                 .withNamespace(DELETE_NAMESPACE).withMaxSegmentSize(DELETE_SEGMENT_SIZE).build();
-        final Indexed<Long> lastEntry = deleteJournal.writer().getLastEntry();
+        final var lastEntry = deleteJournal.writer().getLastEntry();
         lastDelete = lastEntry == null ? 0 : lastEntry.entry();
 
         dataJournal = new DataJournalV0(persistenceId, messageSize, context().system(), storage, directory,
             maxEntrySize, maxSegmentSize);
         dataJournal.deleteTo(lastDelete);
-        LOG.debug("{}: journal open with last index {}, deleted to {}", persistenceId,
+        LOG.debug("{}: journal open in {} with last index {}, deleted to {}", persistenceId, sw,
             dataJournal.lastWrittenSequenceNr(), lastDelete);
     }
+
+    abstract void flushWrites();
+
 }
diff --git a/opendaylight/md-sal/sal-akka-segmented-journal/src/test/java/org/opendaylight/controller/akka/segjournal/PerformanceTest.java b/opendaylight/md-sal/sal-akka-segmented-journal/src/test/java/org/opendaylight/controller/akka/segjournal/PerformanceTest.java
new file mode 100644 (file)
index 0000000..636a5e1
--- /dev/null
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.akka.segjournal;
+
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.actor.PoisonPill;
+import akka.persistence.AtomicWrite;
+import akka.persistence.PersistentRepr;
+import akka.testkit.CallingThreadDispatcher;
+import akka.testkit.javadsl.TestKit;
+import com.codahale.metrics.Histogram;
+import com.codahale.metrics.UniformReservoir;
+import com.google.common.base.Stopwatch;
+import com.google.common.base.Ticker;
+import io.atomix.storage.journal.StorageLevel;
+import java.io.File;
+import java.io.Serializable;
+import java.util.List;
+import java.util.Optional;
+import java.util.concurrent.ThreadLocalRandom;
+import org.apache.commons.io.FileUtils;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
+import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WriteMessages;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.Future;
+
+class PerformanceTest {
+    private static final class Payload implements Serializable {
+        @java.io.Serial
+        private static final long serialVersionUID = 1L;
+
+        final byte[] bytes;
+
+        Payload(final int size, final ThreadLocalRandom random) {
+            bytes = new byte[size];
+            random.nextBytes(bytes);
+        }
+    }
+
+    private static final class Request {
+        final WriteMessages write = new WriteMessages();
+        final Future<Optional<Exception>> future;
+
+        Request(final AtomicWrite atomicWrite) {
+            future = write.add(atomicWrite);
+        }
+    }
+
+    private static final Logger LOG = LoggerFactory.getLogger(PerformanceTest.class);
+    private static final File DIRECTORY = new File("target/sfj-perf");
+
+    private static ActorSystem SYSTEM;
+
+    private TestKit kit;
+    private ActorRef actor;
+
+    @BeforeAll
+    static void beforeClass() {
+        SYSTEM = ActorSystem.create("test");
+    }
+
+    @AfterAll
+    static void afterClass() {
+        TestKit.shutdownActorSystem(SYSTEM);
+        SYSTEM = null;
+    }
+
+    @BeforeEach
+    void before() {
+        kit = new TestKit(SYSTEM);
+        FileUtils.deleteQuietly(DIRECTORY);
+    }
+
+    @AfterEach
+    void after() {
+        if (actor != null) {
+            actor.tell(PoisonPill.getInstance(), ActorRef.noSender());
+        }
+        FileUtils.deleteQuietly(DIRECTORY);
+    }
+
+    @Disabled("Disable due to being an extensive time hog")
+    @ParameterizedTest
+    @MethodSource
+    void writeRequests(final StorageLevel storage, final int maxEntrySize, final int maxSegmentSize,
+            final int payloadSize, final int requestCount) {
+        LOG.info("Test {} entrySize={} segmentSize={} payload={} count={}", storage, maxEntrySize, maxSegmentSize,
+            payloadSize, requestCount);
+
+        actor = kit.childActorOf(
+            SegmentedJournalActor.props("perf", DIRECTORY, storage, maxEntrySize, maxSegmentSize, maxEntrySize)
+            .withDispatcher(CallingThreadDispatcher.Id()));
+
+        final var random = ThreadLocalRandom.current();
+        final var sw = Stopwatch.createStarted();
+        final var payloads = new Payload[1_000];
+        for (int i = 0; i < payloads.length; ++i) {
+            payloads[i] = new Payload(payloadSize, random);
+        }
+        LOG.info("{} payloads created in {}", payloads.length, sw.stop());
+
+        sw.reset().start();
+        final var requests = new Request[requestCount];
+        for (int i = 0; i < requests.length; ++i) {
+            requests[i] = new Request(AtomicWrite.apply(PersistentRepr.apply(payloads[random.nextInt(payloads.length)],
+                i, "foo", null, false, kit.getRef(), "uuid")));
+        }
+        LOG.info("{} requests created in {}", requests.length, sw.stop());
+
+        final var histogram = new Histogram(new UniformReservoir(requests.length));
+        sw.reset().start();
+        long started = System.nanoTime();
+        for (var req : requests) {
+            actor.tell(req.write, ActorRef.noSender());
+            assertTrue(req.future.isCompleted());
+            assertTrue(req.future.value().get().get().isEmpty());
+
+            final long now = System.nanoTime();
+            histogram.update(now - started);
+            started = now;
+        }
+        sw.stop();
+        final var snap = histogram.getSnapshot();
+
+        LOG.info("{} requests completed in {}", requests.length, sw);
+        LOG.info("Minimum: {}", formatNanos(snap.getMin()));
+        LOG.info("Maximum: {}", formatNanos(snap.getMax()));
+        LOG.info("Mean:    {}", formatNanos(snap.getMean()));
+        LOG.info("StdDev:  {}", formatNanos(snap.getStdDev()));
+        LOG.info("Median:  {}", formatNanos(snap.getMedian()));
+        LOG.info("75th:    {}", formatNanos(snap.get75thPercentile()));
+        LOG.info("95th:    {}", formatNanos(snap.get95thPercentile()));
+        LOG.info("98th:    {}", formatNanos(snap.get98thPercentile()));
+        LOG.info("99th:    {}", formatNanos(snap.get99thPercentile()));
+        LOG.info("99.9th:  {}", formatNanos(snap.get999thPercentile()));
+    }
+
+    private static List<Arguments> writeRequests() {
+        return List.of(
+            // DISK:
+            // 100K requests, 10K each, 16M max, 128M segment
+            Arguments.of(StorageLevel.DISK, 16 * 1024 * 1024, 128 * 1024 * 1024,    10_000,  100_000),
+            // 100K requests, 10K each, 1M max, 16M segment
+            Arguments.of(StorageLevel.DISK,      1024 * 1024,  16 * 1024 * 1024,    10_000,  100_000),
+            // 10K requests, 100K each, 1M max, 16M segment
+            Arguments.of(StorageLevel.DISK,      1024 * 1024,  16 * 1024 * 1024,   100_000,   10_000),
+            // 1K requests, 1M each, 1M max, 16M segment
+            Arguments.of(StorageLevel.DISK,      1024 * 1024,  16 * 1024 * 1024, 1_000_000,    1_000),
+
+            // MAPPED:
+            // 100K requests, 10K each, 16M max, 128M segment
+            Arguments.of(StorageLevel.MAPPED, 16 * 1024 * 1024, 128 * 1024 * 1024,    10_000,  100_000),
+            // 100K requests, 10K each, 1M max, 16M segment
+            Arguments.of(StorageLevel.MAPPED,      1024 * 1024,  16 * 1024 * 1024,    10_000,  100_000),
+            // 10K requests, 100K each, 1M max, 16M segment
+            Arguments.of(StorageLevel.MAPPED,      1024 * 1024,  16 * 1024 * 1024,   100_000,   10_000),
+            // 1K requests, 1M each, 1M max, 16M segment
+            Arguments.of(StorageLevel.MAPPED,      1024 * 1024,  16 * 1024 * 1024, 1_000_000,    1_000));
+
+    }
+
+    private static String formatNanos(final double nanos) {
+        return formatNanos(Math.round(nanos));
+    }
+
+    private static String formatNanos(final long nanos) {
+        return Stopwatch.createStarted(new Ticker() {
+            boolean started;
+
+            @Override
+            public long read() {
+                if (started) {
+                    return nanos;
+                }
+                started = true;
+                return 0;
+            }
+        }).toString();
+    }
+}
index 87c7f99700a280d9461e0633c6b714b7333380a2..d488dc6cf2e3a354834cfdb604073bd1ea443f24 100644 (file)
@@ -29,4 +29,10 @@ public class SegmentedFileJournalSpecTest extends JavaJournalSpec {
         FileUtils.deleteQuietly(JOURNAL_DIR);
         super.beforeAll();
     }
+
+    @Override
+    public void afterAll() {
+        super.afterAll();
+        FileUtils.deleteQuietly(JOURNAL_DIR);
+    }
 }
index 7db0d4b87e0da205f4a227385d01ec46e4cae6a6..4d3db7980e2116ef013f62b13ed120ddb43c33f9 100644 (file)
@@ -7,13 +7,12 @@
  */
 package org.opendaylight.controller.akka.segjournal;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.reset;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 
@@ -24,7 +23,7 @@ import akka.persistence.AtomicWrite;
 import akka.persistence.PersistentRepr;
 import akka.testkit.CallingThreadDispatcher;
 import akka.testkit.javadsl.TestKit;
-import io.atomix.storage.StorageLevel;
+import io.atomix.storage.journal.StorageLevel;
 import java.io.File;
 import java.io.IOException;
 import java.io.Serializable;
@@ -36,50 +35,59 @@ import java.util.Optional;
 import java.util.function.Consumer;
 import java.util.stream.Collectors;
 import org.apache.commons.io.FileUtils;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.mockito.Mock;
+import org.mockito.junit.jupiter.MockitoExtension;
 import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.AsyncMessage;
 import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WriteMessages;
 import scala.concurrent.Future;
 
-public class SegmentedFileJournalTest {
+@ExtendWith(MockitoExtension.class)
+class SegmentedFileJournalTest {
     private static final File DIRECTORY = new File("target/sfj-test");
     private static final int SEGMENT_SIZE = 1024 * 1024;
     private static final int MESSAGE_SIZE = 512 * 1024;
+    private static final int FLUSH_SIZE = 16 * 1024;
 
     private static ActorSystem SYSTEM;
 
+    @Mock
+    private Consumer<PersistentRepr> firstCallback;
+
     private TestKit kit;
     private ActorRef actor;
 
-    @BeforeClass
-    public static void beforeClass() {
+    @BeforeAll
+    static void beforeClass() {
         SYSTEM = ActorSystem.create("test");
     }
 
-    @AfterClass
-    public static void afterClass() {
+    @AfterAll
+    static void afterClass() {
         TestKit.shutdownActorSystem(SYSTEM);
         SYSTEM = null;
     }
 
-    @Before
-    public void before() {
+    @BeforeEach
+    void before() {
         kit = new TestKit(SYSTEM);
         FileUtils.deleteQuietly(DIRECTORY);
         actor = actor();
     }
 
-    @After
-    public void after() {
+    @AfterEach
+    void after() {
         actor.tell(PoisonPill.getInstance(), ActorRef.noSender());
+        FileUtils.deleteQuietly(DIRECTORY);
     }
 
     @Test
-    public void testDeleteAfterStop() {
+    void testDeleteAfterStop() {
         // Preliminary setup
         final WriteMessages write = new WriteMessages();
         final Future<Optional<Exception>> first = write.add(AtomicWrite.apply(PersistentRepr.apply("first", 1, "foo",
@@ -108,7 +116,7 @@ public class SegmentedFileJournalTest {
     }
 
     @Test
-    public void testSegmentation() throws IOException {
+    void testSegmentation() throws IOException {
         // We want to have roughly three segments
         final LargePayload payload = new LargePayload();
 
@@ -133,7 +141,7 @@ public class SegmentedFileJournalTest {
     }
 
     @Test
-    public void testComplexDeletesAndPartialReplays() throws Exception {
+    void testComplexDeletesAndPartialReplays() throws Exception {
         for (int i = 0; i <= 4; i++) {
             writeBigPaylod();
         }
@@ -203,7 +211,7 @@ public class SegmentedFileJournalTest {
 
     private ActorRef actor() {
         return kit.childActorOf(SegmentedJournalActor.props("foo", DIRECTORY, StorageLevel.DISK, MESSAGE_SIZE,
-            SEGMENT_SIZE).withDispatcher(CallingThreadDispatcher.Id()));
+            SEGMENT_SIZE, FLUSH_SIZE).withDispatcher(CallingThreadDispatcher.Id()));
     }
 
     private void deleteEntries(final long deleteTo) {
@@ -219,8 +227,8 @@ public class SegmentedFileJournalTest {
     }
 
     private void assertReplayCount(final int expected) {
-        Consumer<PersistentRepr> firstCallback = mock(Consumer.class);
-        doNothing().when(firstCallback).accept(any(PersistentRepr.class));
+        // Cast fixes an Eclipse warning 'generic array created'
+        reset((Object) firstCallback);
         AsyncMessage<Void> replay = SegmentedJournalActor.replayMessages(0, Long.MAX_VALUE, Long.MAX_VALUE,
             firstCallback);
         actor.tell(replay, ActorRef.noSender());
@@ -243,10 +251,10 @@ public class SegmentedFileJournalTest {
         return future.value().get().get();
     }
 
-    private static final class LargePayload implements Serializable {
+    static final class LargePayload implements Serializable {
+        @java.io.Serial
         private static final long serialVersionUID = 1L;
 
         final byte[] bytes = new byte[MESSAGE_SIZE / 2];
-
     }
 }
index 3240e8849bd8a1e9dc798bc6e30c5ce55c80f021..dd32609d307c32838291646b34e4efdcb5dbb6d7 100644 (file)
@@ -4,7 +4,7 @@
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-it-parent</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../mdsal-it-parent</relativePath>
   </parent>
   <artifactId>sal-binding-it</artifactId>
index 60b4865ee2aa7e7a043a14114fdc961d296f2fec..0996dae3b938a44d76314d3f7392128c28ffba6d 100644 (file)
@@ -33,7 +33,6 @@ public abstract class AbstractIT extends AbstractMdsalTestBase {
     protected Option[] getAdditionalOptions() {
         return new Option[] {
                 mavenBundle("org.opendaylight.controller", "sal-test-model").versionAsInProject(),
-                mavenBundle("net.bytebuddy", "byte-buddy").versionAsInProject(),
         };
     }
 }
index c1d94e65bebf2d11736d4b7b94f1fc706665b631..163163bf2878ced066a12c16672c47d83324e96c 100644 (file)
@@ -10,16 +10,12 @@ package org.opendaylight.controller.test.sal.binding.it;
 import static org.junit.Assert.assertEquals;
 
 import java.util.ArrayList;
-import java.util.List;
 import javax.inject.Inject;
 import org.junit.Test;
 import org.opendaylight.mdsal.binding.api.NotificationPublishService;
 import org.opendaylight.mdsal.binding.api.NotificationService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.notification.rev150205.OpendaylightTestNotificationListener;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.notification.rev150205.OutOfPixieDustNotification;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.notification.rev150205.OutOfPixieDustNotificationBuilder;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.NotificationListener;
 import org.opendaylight.yangtools.yang.common.Uint16;
 import org.ops4j.pax.exam.util.Filter;
 import org.slf4j.Logger;
@@ -44,71 +40,53 @@ public class NotificationIT extends AbstractIT {
      */
     @Test
     public void notificationTest() throws Exception {
-        NotificationTestListener listener1 = new NotificationTestListener();
-        ListenerRegistration<NotificationListener> listener1Reg =
-                notificationService.registerNotificationListener(listener1);
-
-        LOG.info("The notification of type FlowAdded with cookie ID 0 is created. The "
-                + "delay 100ms to make sure that the notification was delivered to "
-                + "listener.");
-        notificationPublishService.putNotification(noDustNotification("rainy day", 42));
-        Thread.sleep(100);
-
-        /**
-         * Check that one notification was delivered and has correct cookie.
-         */
-        assertEquals(1, listener1.notificationBag.size());
-        assertEquals("rainy day", listener1.notificationBag.get(0).getReason());
-        assertEquals(42, listener1.notificationBag.get(0).getDaysTillNewDust().intValue());
-
-        LOG.info("The registration of the Consumer 2. SalFlowListener is registered "
+        final var bag1 = new ArrayList<OutOfPixieDustNotification>();
+        try (var reg1 = notificationService.registerListener(OutOfPixieDustNotification.class, bag1::add)) {
+            LOG.info("""
+                The notification of type FlowAdded with cookie ID 0 is created. The\s\
+                delay 100ms to make sure that the notification was delivered to\s\
+                listener.""");
+            notificationPublishService.putNotification(noDustNotification("rainy day", 42));
+            Thread.sleep(100);
+
+            // Check that one notification was delivered and has correct cookie.
+            assertEquals(1, bag1.size());
+            assertEquals("rainy day", bag1.get(0).getReason());
+            assertEquals(42, bag1.get(0).getDaysTillNewDust().intValue());
+
+            LOG.info("The registration of the Consumer 2. SalFlowListener is registered "
                 + "registered as notification listener.");
 
-        NotificationTestListener listener2 = new NotificationTestListener();
-        final ListenerRegistration<NotificationListener> listener2Reg =
-                notificationService.registerNotificationListener(listener2);
-
-        LOG.info("3 notifications are published");
-        notificationPublishService.putNotification(noDustNotification("rainy day", 5));
-        notificationPublishService.putNotification(noDustNotification("rainy day", 10));
-        notificationPublishService.putNotification(noDustNotification("tax collector", 2));
-
-        /**
-         * The delay 100ms to make sure that the notifications were delivered to
-         * listeners.
-         */
-        Thread.sleep(100);
-
-        /**
-         * Check that 3 notification was delivered to both listeners (first one
-         * received 4 in total, second 3 in total).
-         */
-        assertEquals(4, listener1.notificationBag.size());
-        assertEquals(3, listener2.notificationBag.size());
-
-        /**
-         * The second listener is closed (unregistered)
-         *
-         */
-        listener2Reg.close();
-
-        LOG.info("The notification 5 is published");
-        notificationPublishService.putNotification(noDustNotification("entomologist hunt", 10));
-
-        /**
-         * The delay 100ms to make sure that the notification was delivered to
-         * listener.
-         */
-        Thread.sleep(100);
-
-        /**
-         * Check that first consumer received 5 notifications in total, second
-         * consumer received only three. Last notification was never received by
-         * second consumer because its listener was unregistered.
-         *
-         */
-        assertEquals(5, listener1.notificationBag.size());
-        assertEquals(3, listener2.notificationBag.size());
+            final var bag2 = new ArrayList<OutOfPixieDustNotification>();
+            try (var reg2 = notificationService.registerListener(OutOfPixieDustNotification.class, bag2::add)) {
+                LOG.info("3 notifications are published");
+                notificationPublishService.putNotification(noDustNotification("rainy day", 5));
+                notificationPublishService.putNotification(noDustNotification("rainy day", 10));
+                notificationPublishService.putNotification(noDustNotification("tax collector", 2));
+
+                // The delay 100ms to make sure that the notifications were delivered to listeners.
+                Thread.sleep(100);
+
+                // Check that 3 notification was delivered to both listeners (first one  received 4 in total, second 3
+                // in total).
+                assertEquals(4, bag1.size());
+                assertEquals(3, bag2.size());
+
+                // The second listener is closed (unregistered)
+                reg2.close();
+
+                LOG.info("The notification 5 is published");
+                notificationPublishService.putNotification(noDustNotification("entomologist hunt", 10));
+
+                // The delay 100ms to make sure that the notification was delivered to listener.
+                Thread.sleep(100);
+
+                // Check that first consumer received 5 notifications in total, second  consumer received only three.
+                // Last notification was never received by second consumer because its listener was unregistered.
+                assertEquals(5, bag1.size());
+                assertEquals(3, bag2.size());
+            }
+        }
     }
 
     /**
@@ -121,17 +99,4 @@ public class NotificationIT extends AbstractIT {
         ret.setReason(reason).setDaysTillNewDust(Uint16.valueOf(days));
         return ret.build();
     }
-
-    /**
-     * Implements {@link OpendaylightTestNotificationListener} and contains attributes which keep lists of objects of
-     * the type {@link OutOfPixieDustNotification}.
-     */
-    public static class NotificationTestListener implements OpendaylightTestNotificationListener {
-        List<OutOfPixieDustNotification> notificationBag = new ArrayList<>();
-
-        @Override
-        public void onOutOfPixieDustNotification(final OutOfPixieDustNotification arg0) {
-            notificationBag.add(arg0);
-        }
-    }
 }
index 523071415df6474e9e3be50b58460f39bbc1e5c7..5cdde319cd49d736e812dde3be3cebaa2624b518 100644 (file)
@@ -9,7 +9,8 @@ package org.opendaylight.controller.test.sal.binding.it;
 
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNotSame;
-import static org.junit.Assert.assertSame;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
@@ -19,10 +20,9 @@ import java.util.Set;
 import javax.inject.Inject;
 import org.junit.Before;
 import org.junit.Test;
-import org.mockito.Mockito;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
 import org.opendaylight.mdsal.binding.api.RpcProviderService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.OpendaylightTestRoutedRpcService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.RoutedSimpleRoute;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.RoutedSimpleRouteInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.RoutedSimpleRouteInputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.RoutedSimpleRouteOutput;
@@ -30,7 +30,7 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controll
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.store.rev140422.lists.UnorderedContainer;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.store.rev140422.lists.unordered.container.UnorderedList;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.store.rev140422.lists.unordered.container.UnorderedListKey;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.ops4j.pax.exam.util.Filter;
@@ -41,12 +41,10 @@ import org.slf4j.LoggerFactory;
  * Covers routed rpc creation, registration, invocation, unregistration.
  */
 public class RoutedServiceIT extends AbstractIT {
+    private static final Logger LOG = LoggerFactory.getLogger(RoutedServiceIT.class);
 
-    private static final Logger LOG = LoggerFactory
-            .getLogger(RoutedServiceIT.class);
-
-    protected OpendaylightTestRoutedRpcService odlRoutedService1;
-    protected OpendaylightTestRoutedRpcService odlRoutedService2;
+    protected RoutedSimpleRoute routedSimpleRouteRpc1;
+    protected RoutedSimpleRoute routedSimpleRouteRpc2;
 
     @Inject
     @Filter(timeout = 120 * 1000)
@@ -54,95 +52,92 @@ public class RoutedServiceIT extends AbstractIT {
 
     @Inject
     @Filter(timeout = 120 * 1000)
-    RpcConsumerRegistry rpcConsumerRegistry;
+    RpcService rpcService;
 
     /**
      * Prepare mocks.
      */
     @Before
     public void setUp() {
-        odlRoutedService1 = mock(OpendaylightTestRoutedRpcService.class, "First Flow Service");
-        odlRoutedService2 = mock(OpendaylightTestRoutedRpcService.class, "Second Flow Service");
-        Mockito.when(odlRoutedService1.routedSimpleRoute(Mockito.<RoutedSimpleRouteInput>any()))
-            .thenReturn(Futures.<RpcResult<RoutedSimpleRouteOutput>>immediateFuture(null));
-        Mockito.when(odlRoutedService2.routedSimpleRoute(Mockito.<RoutedSimpleRouteInput>any()))
-            .thenReturn(Futures.<RpcResult<RoutedSimpleRouteOutput>>immediateFuture(null));
+        routedSimpleRouteRpc1 = mock(RoutedSimpleRoute.class, "First Flow Rpc");
+        doReturn(RoutedSimpleRoute.class).when(routedSimpleRouteRpc1).implementedInterface();
+        doReturn(Futures.<RpcResult<RoutedSimpleRouteOutput>>immediateFuture(null)).when(routedSimpleRouteRpc1)
+            .invoke(any());
+
+        routedSimpleRouteRpc2 = mock(RoutedSimpleRoute.class, "Second Flow Rpc");
+        doReturn(RoutedSimpleRoute.class).when(routedSimpleRouteRpc2).implementedInterface();
+        doReturn(Futures.<RpcResult<RoutedSimpleRouteOutput>>immediateFuture(null)).when(routedSimpleRouteRpc2)
+            .invoke(any());
     }
 
     @Test
     public void testServiceRegistration() {
-        LOG.info("Register provider 1 with first implementation of routeSimpleService - service1 of node 1");
+        LOG.info("Register provider 1 with first implementation of routeSimpleService - rpc1 of node 1");
         final InstanceIdentifier<UnorderedList> nodeOnePath = createNodeRef("foo:node:1");
         final InstanceIdentifier<UnorderedList> nodeTwo = createNodeRef("foo:node:2");
 
-        ObjectRegistration<OpendaylightTestRoutedRpcService> firstReg = rpcProviderService.registerRpcImplementation(
-            OpendaylightTestRoutedRpcService.class, odlRoutedService1,  Set.of(nodeOnePath));
+        Registration firstReg = rpcProviderService.registerRpcImplementation(routedSimpleRouteRpc1,
+            Set.of(nodeOnePath));
         assertNotNull("Registration should not be null", firstReg);
-        assertSame(odlRoutedService1, firstReg.getInstance());
 
-        LOG.info("Register provider 2 with second implementation of routeSimpleService - service2 of node 2");
+        LOG.info("Register provider 2 with second implementation of routeSimpleService - rpc2 of node 2");
 
-        ObjectRegistration<OpendaylightTestRoutedRpcService> secondReg = rpcProviderService.registerRpcImplementation(
-            OpendaylightTestRoutedRpcService.class, odlRoutedService2, Set.of(nodeTwo));
+        Registration secondReg = rpcProviderService.registerRpcImplementation(routedSimpleRouteRpc2, Set.of(nodeTwo));
         assertNotNull("Registration should not be null", firstReg);
-        assertSame(odlRoutedService2, secondReg.getInstance());
         assertNotSame(secondReg, firstReg);
 
-        OpendaylightTestRoutedRpcService consumerService =
-                rpcConsumerRegistry.getRpcService(OpendaylightTestRoutedRpcService.class);
+        RoutedSimpleRoute consumerService = rpcService.getRpc(RoutedSimpleRoute.class);
         assertNotNull("MD-SAL instance of test Service should be returned", consumerService);
-        assertNotSame("Provider instance and consumer instance should not be same.", odlRoutedService1,
+        assertNotSame("Provider instance and consumer instance should not be same.", routedSimpleRouteRpc1,
                 consumerService);
 
         /**
          * Consumer creates addFlow message for node one and sends it to the MD-SAL.
          */
         final RoutedSimpleRouteInput simpleRouteFirstFoo = createSimpleRouteInput(nodeOnePath);
-        consumerService.routedSimpleRoute(simpleRouteFirstFoo);
+        consumerService.invoke(simpleRouteFirstFoo);
 
         /**
-         * Verifies that implementation of the first provider received the same message from MD-SAL.
+         * Verifies that implementation of the first instance received the same message from MD-SAL.
          */
-        verify(odlRoutedService1).routedSimpleRoute(simpleRouteFirstFoo);
+        verify(routedSimpleRouteRpc1).invoke(simpleRouteFirstFoo);
         /**
          * Verifies that second instance was not invoked with first message
          */
-        verify(odlRoutedService2, times(0)).routedSimpleRoute(simpleRouteFirstFoo);
+        verify(routedSimpleRouteRpc2, times(0)).invoke(simpleRouteFirstFoo);
 
         /**
          * Consumer sends message to nodeTwo for three times. Should be processed by second instance.
          */
         final RoutedSimpleRouteInput simpleRouteSecondFoo = createSimpleRouteInput(nodeTwo);
-        consumerService.routedSimpleRoute(simpleRouteSecondFoo);
-        consumerService.routedSimpleRoute(simpleRouteSecondFoo);
-        consumerService.routedSimpleRoute(simpleRouteSecondFoo);
+        consumerService.invoke(simpleRouteSecondFoo);
+        consumerService.invoke(simpleRouteSecondFoo);
+        consumerService.invoke(simpleRouteSecondFoo);
 
         /**
          * Verifies that second instance was invoked 3 times with second message and first instance wasn't invoked.
          */
-        verify(odlRoutedService2, times(3)).routedSimpleRoute(simpleRouteSecondFoo);
-        verify(odlRoutedService1, times(0)).routedSimpleRoute(simpleRouteSecondFoo);
+        verify(routedSimpleRouteRpc2, times(3)).invoke(simpleRouteSecondFoo);
+        verify(routedSimpleRouteRpc1, times(0)).invoke(simpleRouteSecondFoo);
 
         LOG.info("Unregistration of the path for the node one in the first provider");
         firstReg.close();
 
         LOG.info("Provider 2 registers path of node 1");
         secondReg.close();
-        secondReg = rpcProviderService.registerRpcImplementation(
-            OpendaylightTestRoutedRpcService.class, odlRoutedService2, Set.of(nodeOnePath));
+        secondReg = rpcProviderService.registerRpcImplementation(routedSimpleRouteRpc2, Set.of(nodeOnePath));
 
         /**
          * A consumer sends third message to node 1.
          */
         final RoutedSimpleRouteInput simpleRouteThirdFoo = createSimpleRouteInput(nodeOnePath);
-        consumerService.routedSimpleRoute(simpleRouteThirdFoo);
+        consumerService.invoke(simpleRouteThirdFoo);
 
         /**
          * Verifies that provider 1 wasn't invoked and provider 2 was invoked 1 time.
          * TODO: fix unregister path
          */
-        //verify(odlRoutedService1, times(0)).routedSimpleRoute(simpleRouteThirdFoo);
-        verify(odlRoutedService2).routedSimpleRoute(simpleRouteThirdFoo);
+        verify(routedSimpleRouteRpc2).invoke(simpleRouteThirdFoo);
     }
 
     /**
index ce3e83d007c9de8fe1f72d5e7769150659457dbb..ad8d996edce0f3ad1e0c2c9b16a7fdfcbc5ae19a 100644 (file)
@@ -4,7 +4,7 @@
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-parent</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../parent</relativePath>
   </parent>
 
   <packaging>bundle</packaging>
 
   <dependencies>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-common</artifactId>
+    </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>cds-access-api</artifactId>
index 713d07a85ec1e7721bc0672136d31359c0bd3b19..cb905343c8ebefd5852da7c7e3cbd4a0c388b8b8 100644 (file)
@@ -4,7 +4,7 @@
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-parent</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../parent</relativePath>
   </parent>
 
 
   <dependencies>
     <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-cluster-admin-api</artifactId>
+      <groupId>com.github.spotbugs</groupId>
+      <artifactId>spotbugs-annotations</artifactId>
+      <optional>true</optional>
     </dependency>
-
-    <!-- Tests -->
     <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-simple</artifactId>
-      <scope>test</scope>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-akka-raft</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-lang3</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-distributed-datastore</artifactId>
-      <type>test-jar</type>
-      <version>${project.version}</version>
-      <scope>test</scope>
+      <groupId>org.eclipse.jdt</groupId>
+      <artifactId>org.eclipse.jdt.annotation</artifactId>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-test-util</artifactId>
+      <artifactId>concepts</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.opendaylight.controller.samples</groupId>
-      <artifactId>clustering-it-model</artifactId>
-      <scope>test</scope>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-common</artifactId>
     </dependency>
     <dependency>
-      <groupId>commons-lang</groupId>
-      <artifactId>commons-lang</artifactId>
-      <scope>test</scope>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>mdsal-binding-api</artifactId>
     </dependency>
-
-    <!-- Akka -->
     <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-testkit_2.13</artifactId>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>mdsal-dom-spi</artifactId>
     </dependency>
-
-    <!-- Scala -->
     <dependency>
-      <groupId>org.scala-lang</groupId>
-      <artifactId>scala-library</artifactId>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>yang-binding</artifactId>
     </dependency>
-
-    <!-- OpenDaylight -->
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>eos-dom-akka</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-binding-api</artifactId>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>cds-access-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>repackaged-akka</artifactId>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>sal-akka-raft</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-cluster-admin-api</artifactId>
+    </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>sal-distributed-datastore</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>yang-binding</artifactId>
+      <groupId>org.osgi</groupId>
+      <artifactId>org.osgi.service.component.annotations</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.scala-lang</groupId>
+      <artifactId>scala-library</artifactId>
+    </dependency>
+
+    <!-- Tests -->
+    <dependency>
+      <groupId>com.typesafe.akka</groupId>
+      <artifactId>akka-testkit_2.13</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-akka-raft</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-distributed-datastore</artifactId>
+      <type>test-jar</type>
+      <version>${project.version}</version>
+      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-common</artifactId>
+      <artifactId>yang-data-api</artifactId>
+      <scope>test</scope>
     </dependency>
     <dependency>
-      <groupId>org.apache.commons</groupId>
-      <artifactId>commons-lang3</artifactId>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-test-util</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.osgi</groupId>
-      <artifactId>osgi.cmpn</artifactId>
+      <groupId>org.opendaylight.controller.samples</groupId>
+      <artifactId>clustering-it-model</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-simple</artifactId>
+      <scope>test</scope>
     </dependency>
-
   </dependencies>
 
   <build>
index e00d620d7c0e84492f045dbfe2492d1e6a6e8267..8ad1553dba2268daa9c7a86fcad319997d8fa22e 100644 (file)
@@ -13,6 +13,7 @@ import akka.actor.Status.Success;
 import akka.dispatch.OnComplete;
 import akka.pattern.Patterns;
 import akka.util.Timeout;
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Strings;
 import com.google.common.base.Throwables;
 import com.google.common.collect.ImmutableMap;
@@ -55,43 +56,52 @@ import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.cluster.raft.client.messages.GetSnapshot;
 import org.opendaylight.controller.eos.akka.DataCenterControl;
-import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ActivateEosDatacenter;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ActivateEosDatacenterInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ActivateEosDatacenterOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShards;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsOutputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastore;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShards;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShard;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardOutputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DeactivateEosDatacenter;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DeactivateEosDatacenterInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DeactivateEosDatacenterOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShards;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetKnownClientsForAllShards;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetKnownClientsForAllShardsInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetKnownClientsForAllShardsOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetKnownClientsForAllShardsOutputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRoleInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRoleOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRoleOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.LocateShard;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.LocateShardInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.LocateShardOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.LocateShardOutputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicas;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasOutputBuilder;
@@ -107,8 +117,9 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controll
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResult;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResultBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResultKey;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.common.Empty;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
+import org.opendaylight.yangtools.yang.common.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
 import org.opendaylight.yangtools.yang.common.Uint32;
@@ -121,39 +132,58 @@ import scala.concurrent.Future;
  *
  * @author Thomas Pantelis
  */
-public class ClusterAdminRpcService implements ClusterAdminService {
+public final class ClusterAdminRpcService {
     private static final Timeout SHARD_MGR_TIMEOUT = new Timeout(1, TimeUnit.MINUTES);
 
     private static final Logger LOG = LoggerFactory.getLogger(ClusterAdminRpcService.class);
     private static final @NonNull RpcResult<LocateShardOutput> LOCAL_SHARD_RESULT =
             RpcResultBuilder.success(new LocateShardOutputBuilder()
-                .setMemberNode(new LocalBuilder().setLocal(Empty.getInstance()).build())
+                .setMemberNode(new LocalBuilder().setLocal(Empty.value()).build())
                 .build())
             .build();
 
     private final DistributedDataStoreInterface configDataStore;
     private final DistributedDataStoreInterface operDataStore;
-    private final BindingNormalizedNodeSerializer serializer;
     private final Timeout makeLeaderLocalTimeout;
     private final DataCenterControl dataCenterControl;
 
     public ClusterAdminRpcService(final DistributedDataStoreInterface configDataStore,
                                   final DistributedDataStoreInterface operDataStore,
-                                  final BindingNormalizedNodeSerializer serializer,
                                   final DataCenterControl dataCenterControl) {
         this.configDataStore = configDataStore;
         this.operDataStore = operDataStore;
-        this.serializer = serializer;
 
-        this.makeLeaderLocalTimeout =
+        makeLeaderLocalTimeout =
                 new Timeout(configDataStore.getActorUtils().getDatastoreContext()
                         .getShardLeaderElectionTimeout().duration().$times(2));
 
         this.dataCenterControl = dataCenterControl;
     }
 
-    @Override
-    public ListenableFuture<RpcResult<AddShardReplicaOutput>> addShardReplica(final AddShardReplicaInput input) {
+    Registration registerWith(final RpcProviderService rpcProviderService) {
+        return rpcProviderService.registerRpcImplementations(
+            (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013
+                .AddShardReplica) this::addShardReplica,
+            (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013
+                .RemoveShardReplica) this::removeShardReplica,
+            (LocateShard) this::locateShard,
+            (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013
+                .MakeLeaderLocal) this::makeLeaderLocal,
+            (AddReplicasForAllShards) this::addReplicasForAllShards,
+            (RemoveAllShardReplicas) this::removeAllShardReplicas,
+            (ChangeMemberVotingStatesForShard) this::changeMemberVotingStatesForShard,
+            (ChangeMemberVotingStatesForAllShards) this::changeMemberVotingStatesForAllShards,
+            (FlipMemberVotingStatesForAllShards) this::flipMemberVotingStatesForAllShards,
+            (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013
+                .GetShardRole) this::getShardRole,
+            (BackupDatastore) this::backupDatastore,
+            (GetKnownClientsForAllShards) this::getKnownClientsForAllShards,
+            (ActivateEosDatacenter) this::activateEosDatacenter,
+            (DeactivateEosDatacenter) this::deactivateEosDatacenter);
+    }
+
+    @VisibleForTesting
+    ListenableFuture<RpcResult<AddShardReplicaOutput>> addShardReplica(final AddShardReplicaInput input) {
         final String shardName = input.getShardName();
         if (Strings.isNullOrEmpty(shardName)) {
             return newFailedRpcResultFuture("A valid shard name must be specified");
@@ -166,28 +196,27 @@ public class ClusterAdminRpcService implements ClusterAdminService {
 
         LOG.info("Adding replica for shard {}", shardName);
 
-        final SettableFuture<RpcResult<AddShardReplicaOutput>> returnFuture = SettableFuture.create();
-        ListenableFuture<Success> future = sendMessageToShardManager(dataStoreType, new AddShardReplica(shardName));
-        Futures.addCallback(future, new FutureCallback<Success>() {
-            @Override
-            public void onSuccess(final Success success) {
-                LOG.info("Successfully added replica for shard {}", shardName);
-                returnFuture.set(newSuccessfulResult(new AddShardReplicaOutputBuilder().build()));
-            }
+        final var returnFuture = SettableFuture.<RpcResult<AddShardReplicaOutput>>create();
+        Futures.addCallback(sendMessageToShardManager(dataStoreType, new AddShardReplica(shardName)),
+            new FutureCallback<Success>() {
+                @Override
+                public void onSuccess(final Success success) {
+                    LOG.info("Successfully added replica for shard {}", shardName);
+                    returnFuture.set(newSuccessfulResult(new AddShardReplicaOutputBuilder().build()));
+                }
 
-            @Override
-            public void onFailure(final Throwable failure) {
-                onMessageFailure(String.format("Failed to add replica for shard %s", shardName),
+                @Override
+                public void onFailure(final Throwable failure) {
+                    onMessageFailure(String.format("Failed to add replica for shard %s", shardName),
                         returnFuture, failure);
-            }
-        }, MoreExecutors.directExecutor());
+                }
+            }, MoreExecutors.directExecutor());
 
         return returnFuture;
     }
 
-    @Override
-    public ListenableFuture<RpcResult<RemoveShardReplicaOutput>> removeShardReplica(
-            final RemoveShardReplicaInput input) {
+    @VisibleForTesting
+    ListenableFuture<RpcResult<RemoveShardReplicaOutput>> removeShardReplica(final RemoveShardReplicaInput input) {
         final String shardName = input.getShardName();
         if (Strings.isNullOrEmpty(shardName)) {
             return newFailedRpcResultFuture("A valid shard name must be specified");
@@ -225,8 +254,7 @@ public class ClusterAdminRpcService implements ClusterAdminService {
         return returnFuture;
     }
 
-    @Override
-    public ListenableFuture<RpcResult<LocateShardOutput>> locateShard(final LocateShardInput input) {
+    private ListenableFuture<RpcResult<LocateShardOutput>> locateShard(final LocateShardInput input) {
         final ActorUtils utils;
         switch (input.getDataStoreType()) {
             case Config:
@@ -267,8 +295,8 @@ public class ClusterAdminRpcService implements ClusterAdminService {
         return ret;
     }
 
-    @Override
-    public ListenableFuture<RpcResult<MakeLeaderLocalOutput>> makeLeaderLocal(final MakeLeaderLocalInput input) {
+    @VisibleForTesting
+    ListenableFuture<RpcResult<MakeLeaderLocalOutput>> makeLeaderLocal(final MakeLeaderLocalInput input) {
         final String shardName = input.getShardName();
         if (Strings.isNullOrEmpty(shardName)) {
             return newFailedRpcResultFuture("A valid shard name must be specified");
@@ -322,8 +350,7 @@ public class ClusterAdminRpcService implements ClusterAdminService {
         return future;
     }
 
-    @Override
-    public ListenableFuture<RpcResult<AddReplicasForAllShardsOutput>> addReplicasForAllShards(
+    @VisibleForTesting ListenableFuture<RpcResult<AddReplicasForAllShardsOutput>> addReplicasForAllShards(
             final AddReplicasForAllShardsInput input) {
         LOG.info("Adding replicas for all shards");
 
@@ -337,9 +364,7 @@ public class ClusterAdminRpcService implements ClusterAdminService {
                 "Failed to add replica");
     }
 
-
-    @Override
-    public ListenableFuture<RpcResult<RemoveAllShardReplicasOutput>> removeAllShardReplicas(
+    @VisibleForTesting ListenableFuture<RpcResult<RemoveAllShardReplicasOutput>> removeAllShardReplicas(
             final RemoveAllShardReplicasInput input) {
         LOG.info("Removing replicas for all shards");
 
@@ -355,56 +380,54 @@ public class ClusterAdminRpcService implements ClusterAdminService {
         sendMessageToManagerForConfiguredShards(DataStoreType.Config, shardResultData, messageSupplier);
         sendMessageToManagerForConfiguredShards(DataStoreType.Operational, shardResultData, messageSupplier);
 
-        return waitForShardResults(shardResultData, shardResults ->
-                new RemoveAllShardReplicasOutputBuilder().setShardResult(shardResults).build(),
-        "       Failed to remove replica");
+        return waitForShardResults(shardResultData,
+            shardResults -> new RemoveAllShardReplicasOutputBuilder().setShardResult(shardResults).build(),
+            "       Failed to remove replica");
     }
 
-    @Override
-    public ListenableFuture<RpcResult<ChangeMemberVotingStatesForShardOutput>> changeMemberVotingStatesForShard(
+    @VisibleForTesting
+    ListenableFuture<RpcResult<ChangeMemberVotingStatesForShardOutput>> changeMemberVotingStatesForShard(
             final ChangeMemberVotingStatesForShardInput input) {
         final String shardName = input.getShardName();
         if (Strings.isNullOrEmpty(shardName)) {
             return newFailedRpcResultFuture("A valid shard name must be specified");
         }
 
-        DataStoreType dataStoreType = input.getDataStoreType();
+        final var dataStoreType = input.getDataStoreType();
         if (dataStoreType == null) {
             return newFailedRpcResultFuture("A valid DataStoreType must be specified");
         }
 
-        List<MemberVotingState> memberVotingStates = input.getMemberVotingState();
+        final var memberVotingStates = input.getMemberVotingState();
         if (memberVotingStates == null || memberVotingStates.isEmpty()) {
             return newFailedRpcResultFuture("No member voting state input was specified");
         }
 
-        ChangeShardMembersVotingStatus changeVotingStatus = toChangeShardMembersVotingStatus(shardName,
-                memberVotingStates);
-
+        final var changeVotingStatus = toChangeShardMembersVotingStatus(shardName, memberVotingStates);
         LOG.info("Change member voting states for shard {}: {}", shardName,
                 changeVotingStatus.getMeberVotingStatusMap());
 
-        final SettableFuture<RpcResult<ChangeMemberVotingStatesForShardOutput>> returnFuture = SettableFuture.create();
-        ListenableFuture<Success> future = sendMessageToShardManager(dataStoreType, changeVotingStatus);
-        Futures.addCallback(future, new FutureCallback<Success>() {
-            @Override
-            public void onSuccess(final Success success) {
-                LOG.info("Successfully changed member voting states for shard {}", shardName);
-                returnFuture.set(newSuccessfulResult(new ChangeMemberVotingStatesForShardOutputBuilder().build()));
-            }
+        final var returnFuture = SettableFuture.<RpcResult<ChangeMemberVotingStatesForShardOutput>>create();
+        Futures.addCallback(sendMessageToShardManager(dataStoreType, changeVotingStatus),
+            new FutureCallback<Success>() {
+                @Override
+                public void onSuccess(final Success success) {
+                    LOG.info("Successfully changed member voting states for shard {}", shardName);
+                    returnFuture.set(newSuccessfulResult(new ChangeMemberVotingStatesForShardOutputBuilder().build()));
+                }
 
-            @Override
-            public void onFailure(final Throwable failure) {
-                onMessageFailure(String.format("Failed to change member voting states for shard %s", shardName),
+                @Override
+                public void onFailure(final Throwable failure) {
+                    onMessageFailure(String.format("Failed to change member voting states for shard %s", shardName),
                         returnFuture, failure);
-            }
-        }, MoreExecutors.directExecutor());
+                }
+            }, MoreExecutors.directExecutor());
 
         return returnFuture;
     }
 
-    @Override
-    public ListenableFuture<RpcResult<ChangeMemberVotingStatesForAllShardsOutput>> changeMemberVotingStatesForAllShards(
+    @VisibleForTesting
+    ListenableFuture<RpcResult<ChangeMemberVotingStatesForAllShardsOutput>> changeMemberVotingStatesForAllShards(
             final ChangeMemberVotingStatesForAllShardsInput input) {
         List<MemberVotingState> memberVotingStates = input.getMemberVotingState();
         if (memberVotingStates == null || memberVotingStates.isEmpty()) {
@@ -425,11 +448,11 @@ public class ClusterAdminRpcService implements ClusterAdminService {
                 "Failed to change member voting states");
     }
 
-    @Override
-    public ListenableFuture<RpcResult<FlipMemberVotingStatesForAllShardsOutput>> flipMemberVotingStatesForAllShards(
+    @VisibleForTesting
+    ListenableFuture<RpcResult<FlipMemberVotingStatesForAllShardsOutput>> flipMemberVotingStatesForAllShards(
             final FlipMemberVotingStatesForAllShardsInput input) {
-        final List<Entry<ListenableFuture<Success>, ShardResultBuilder>> shardResultData = new ArrayList<>();
-        Function<String, Object> messageSupplier = FlipShardMembersVotingStatus::new;
+        final var shardResultData = new ArrayList<Entry<ListenableFuture<Success>, ShardResultBuilder>>();
+        final Function<String, Object> messageSupplier = FlipShardMembersVotingStatus::new;
 
         LOG.info("Flip member voting states for all shards");
 
@@ -441,8 +464,7 @@ public class ClusterAdminRpcService implements ClusterAdminService {
                 "Failed to change member voting states");
     }
 
-    @Override
-    public ListenableFuture<RpcResult<GetShardRoleOutput>> getShardRole(final GetShardRoleInput input) {
+    private ListenableFuture<RpcResult<GetShardRoleOutput>> getShardRole(final GetShardRoleInput input) {
         final String shardName = input.getShardName();
         if (Strings.isNullOrEmpty(shardName)) {
             return newFailedRpcResultFuture("A valid shard name must be specified");
@@ -484,8 +506,8 @@ public class ClusterAdminRpcService implements ClusterAdminService {
         return returnFuture;
     }
 
-    @Override
-    public ListenableFuture<RpcResult<BackupDatastoreOutput>> backupDatastore(final BackupDatastoreInput input) {
+    @VisibleForTesting
+    ListenableFuture<RpcResult<BackupDatastoreOutput>> backupDatastore(final BackupDatastoreInput input) {
         LOG.debug("backupDatastore: {}", input);
 
         if (Strings.isNullOrEmpty(input.getFilePath())) {
@@ -513,9 +535,7 @@ public class ClusterAdminRpcService implements ClusterAdminService {
         return returnFuture;
     }
 
-
-    @Override
-    public ListenableFuture<RpcResult<GetKnownClientsForAllShardsOutput>> getKnownClientsForAllShards(
+    private ListenableFuture<RpcResult<GetKnownClientsForAllShardsOutput>> getKnownClientsForAllShards(
             final GetKnownClientsForAllShardsInput input) {
         final ImmutableMap<ShardIdentifier, ListenableFuture<GetKnownClientsReply>> allShardReplies =
                 getAllShardLeadersClients();
@@ -523,8 +543,7 @@ public class ClusterAdminRpcService implements ClusterAdminService {
             MoreExecutors.directExecutor());
     }
 
-    @Override
-    public ListenableFuture<RpcResult<ActivateEosDatacenterOutput>> activateEosDatacenter(
+    private ListenableFuture<RpcResult<ActivateEosDatacenterOutput>> activateEosDatacenter(
             final ActivateEosDatacenterInput input) {
         LOG.debug("Activating EOS Datacenter");
         final SettableFuture<RpcResult<ActivateEosDatacenterOutput>> future = SettableFuture.create();
@@ -545,8 +564,7 @@ public class ClusterAdminRpcService implements ClusterAdminService {
         return future;
     }
 
-    @Override
-    public ListenableFuture<RpcResult<DeactivateEosDatacenterOutput>> deactivateEosDatacenter(
+    private ListenableFuture<RpcResult<DeactivateEosDatacenterOutput>> deactivateEosDatacenter(
             final DeactivateEosDatacenterInput input) {
         LOG.debug("Deactivating EOS Datacenter");
         final SettableFuture<RpcResult<DeactivateEosDatacenterOutput>> future = SettableFuture.create();
@@ -696,8 +714,6 @@ public class ClusterAdminRpcService implements ClusterAdminService {
         return ask(shardManager, message, SHARD_MGR_TIMEOUT);
     }
 
-    @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-            justification = "https://github.com/spotbugs/spotbugs/issues/811")
     @SuppressWarnings("checkstyle:IllegalCatch")
     private static void saveSnapshotsToFile(final DatastoreSnapshotList snapshots, final String fileName,
             final SettableFuture<RpcResult<BackupDatastoreOutput>> returnFuture) {
index 82a669e166a3781d59528be9ed2150a657cab2db..bcbf408eedbfa04f2f882985a60712bbeb245ff4 100644 (file)
@@ -7,13 +7,10 @@
  */
 package org.opendaylight.controller.cluster.datastore.admin;
 
-import com.google.common.annotations.Beta;
 import org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface;
 import org.opendaylight.controller.eos.akka.DataCenterControl;
 import org.opendaylight.mdsal.binding.api.RpcProviderService;
-import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.osgi.service.component.annotations.Activate;
 import org.osgi.service.component.annotations.Component;
 import org.osgi.service.component.annotations.Deactivate;
@@ -21,28 +18,21 @@ import org.osgi.service.component.annotations.Reference;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-@Beta
-@Component(immediate = true)
+@Component(service = { })
 public final class OSGiClusterAdmin {
     private static final Logger LOG = LoggerFactory.getLogger(OSGiClusterAdmin.class);
 
-    @Reference(target = "(type=distributed-config)")
-    DistributedDataStoreInterface configDatastore = null;
-    @Reference(target = "(type=distributed-operational)")
-    DistributedDataStoreInterface operDatastore = null;
-    @Reference
-    BindingNormalizedNodeSerializer serializer = null;
-    @Reference
-    RpcProviderService rpcProviderService = null;
-    @Reference
-    DataCenterControl dataCenterControl = null;
-
-    private ObjectRegistration<?> reg;
+    private final Registration reg;
 
     @Activate
-    void activate() {
-        reg = rpcProviderService.registerRpcImplementation(ClusterAdminService.class,
-            new ClusterAdminRpcService(configDatastore, operDatastore, serializer, dataCenterControl));
+    public OSGiClusterAdmin(
+            @Reference(target = "(type=distributed-config)") final DistributedDataStoreInterface configDatastore,
+            @Reference(target = "(type=distributed-operational)") final DistributedDataStoreInterface operDatastore,
+            @Reference final RpcProviderService rpcProviderService,
+            @Reference final DataCenterControl dataCenterControls,
+            @Reference final DataCenterControl dataCenterControl) {
+        reg = new ClusterAdminRpcService(configDatastore, operDatastore, dataCenterControl)
+            .registerWith(rpcProviderService);
         LOG.info("Cluster Admin services started");
     }
 
index ba00174f24d3de22947ea8f9945c25f34906732f..2239908877b01d91b2e02b3894cabeb796e6f1a3 100644 (file)
@@ -9,6 +9,7 @@ package org.opendaylight.controller.cluster.datastore.admin;
 
 import static java.lang.Boolean.FALSE;
 import static java.lang.Boolean.TRUE;
+import static java.util.Objects.requireNonNull;
 import static org.hamcrest.CoreMatchers.anyOf;
 import static org.hamcrest.CoreMatchers.containsString;
 import static org.hamcrest.MatcherAssert.assertThat;
@@ -26,18 +27,14 @@ import akka.actor.ActorRef;
 import akka.actor.PoisonPill;
 import akka.actor.Status.Success;
 import akka.cluster.Cluster;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 import java.io.File;
-import java.io.FileInputStream;
-import java.util.AbstractMap.SimpleEntry;
+import java.nio.file.Files;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
-import java.util.Map.Entry;
 import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
@@ -46,6 +43,7 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
+import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
 import org.opendaylight.controller.cluster.datastore.AbstractDataStore;
 import org.opendaylight.controller.cluster.datastore.DatastoreContext;
 import org.opendaylight.controller.cluster.datastore.MemberNode;
@@ -63,35 +61,23 @@ import org.opendaylight.controller.cluster.raft.persisted.UpdateElectionTerm;
 import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
 import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
 import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplicaInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplicaOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.member.voting.states.input.MemberVotingStateBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResult;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResultBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResultKey;
-import org.opendaylight.yangtools.yang.common.RpcError;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.XMLNamespace;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 
 /**
  * Unit tests for ClusterAdminRpcService.
@@ -99,6 +85,12 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
  * @author Thomas Pantelis
  */
 public class ClusterAdminRpcServiceTest {
+    record ExpState(String name, boolean voting) {
+        ExpState {
+            requireNonNull(name);
+        }
+    }
+
     private static final MemberName MEMBER_1 = MemberName.forName("member-1");
     private static final MemberName MEMBER_2 = MemberName.forName("member-2");
     private static final MemberName MEMBER_3 = MemberName.forName("member-3");
@@ -112,34 +104,38 @@ public class ClusterAdminRpcServiceTest {
 
     @After
     public void tearDown() {
-        for (MemberNode m : Lists.reverse(memberNodes)) {
-            m.cleanup();
+        for (var member : Lists.reverse(memberNodes)) {
+            member.cleanup();
         }
         memberNodes.clear();
     }
 
     @Test
     public void testBackupDatastore() throws Exception {
-        MemberNode node = MemberNode.builder(memberNodes).akkaConfig("Member1")
-                .moduleShardsConfig("module-shards-member1.conf").waitForShardLeader("cars", "people")
-                .testName("testBackupDatastore").build();
+        final var node = MemberNode.builder(memberNodes)
+            .akkaConfig("Member1")
+            .moduleShardsConfig("module-shards-member1.conf")
+            .waitForShardLeader("cars", "people")
+            .testName("testBackupDatastore")
+            .build();
 
-        String fileName = "target/testBackupDatastore";
-        new File(fileName).delete();
+        final var fileName = "target/testBackupDatastore";
+        final var file = new File(fileName);
+        file.delete();
 
-        final ClusterAdminRpcService service = new ClusterAdminRpcService(node.configDataStore(), node.operDataStore(),
-                null, null);
+        final var service = new ClusterAdminRpcService(node.configDataStore(), node.operDataStore(), null);
 
-        RpcResult<BackupDatastoreOutput> rpcResult = service .backupDatastore(new BackupDatastoreInputBuilder()
-                .setFilePath(fileName).build()).get(5, TimeUnit.SECONDS);
+        var rpcResult = service.backupDatastore(new BackupDatastoreInputBuilder().setFilePath(fileName).build())
+            .get(5, TimeUnit.SECONDS);
         verifySuccessfulRpcResult(rpcResult);
 
-        try (FileInputStream fis = new FileInputStream(fileName)) {
-            List<DatastoreSnapshot> snapshots = SerializationUtils.deserialize(fis);
+        try (var fis = Files.newInputStream(file.toPath())) {
+            final List<DatastoreSnapshot> snapshots = SerializationUtils.deserialize(fis);
             assertEquals("DatastoreSnapshot size", 2, snapshots.size());
 
-            ImmutableMap<String, DatastoreSnapshot> map = ImmutableMap.of(snapshots.get(0).getType(), snapshots.get(0),
-                    snapshots.get(1).getType(), snapshots.get(1));
+            final var map = Map.of(
+                snapshots.get(0).getType(), snapshots.get(0),
+                snapshots.get(1).getType(), snapshots.get(1));
             verifyDatastoreSnapshot(node.configDataStore().getActorUtils().getDataStoreName(),
                     map.get(node.configDataStore().getActorUtils().getDataStoreName()), "cars", "people");
         } finally {
@@ -151,7 +147,7 @@ public class ClusterAdminRpcServiceTest {
         node.configDataStore().getActorUtils().getShardManager().tell(node.datastoreContextBuilder()
                 .shardInitializationTimeout(200, TimeUnit.MILLISECONDS).build(), ActorRef.noSender());
 
-        ActorRef carsShardActor = node.configDataStore().getActorUtils().findLocalShard("cars").get();
+        final var carsShardActor = node.configDataStore().getActorUtils().findLocalShard("cars").orElseThrow();
         node.kit().watch(carsShardActor);
         carsShardActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
         node.kit().expectTerminated(carsShardActor);
@@ -165,9 +161,9 @@ public class ClusterAdminRpcServiceTest {
     private static void verifyDatastoreSnapshot(final String type, final DatastoreSnapshot datastoreSnapshot,
             final String... expShardNames) {
         assertNotNull("Missing DatastoreSnapshot for type " + type, datastoreSnapshot);
-        Set<String> shardNames = new HashSet<>();
-        for (DatastoreSnapshot.ShardSnapshot s: datastoreSnapshot.getShardSnapshots()) {
-            shardNames.add(s.getName());
+        var shardNames = new HashSet<String>();
+        for (var snapshot : datastoreSnapshot.getShardSnapshots()) {
+            shardNames.add(snapshot.getName());
         }
 
         assertEquals("DatastoreSnapshot shard names", Set.of(expShardNames), shardNames);
@@ -178,7 +174,7 @@ public class ClusterAdminRpcServiceTest {
         String name = "testGetPrefixShardRole";
         String moduleShardsConfig = "module-shards-default-member-1.conf";
 
-        final MemberNode member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         member1.kit().waitUntilLeader(member1.configDataStore().getActorUtils(), "default");
@@ -189,11 +185,11 @@ public class ClusterAdminRpcServiceTest {
         String name = "testModuleShardLeaderMovement";
         String moduleShardsConfig = "module-shards-member1.conf";
 
-        final MemberNode member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .waitForShardLeader("cars").moduleShardsConfig(moduleShardsConfig).build();
-        final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+        final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
-        final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+        final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         member1.waitForMembersUp("member-2", "member-3");
@@ -229,17 +225,17 @@ public class ClusterAdminRpcServiceTest {
     public void testAddShardReplica() throws Exception {
         String name = "testAddShardReplica";
         String moduleShardsConfig = "module-shards-cars-member-1.conf";
-        MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).waitForShardLeader("cars").build();
 
-        MemberNode newReplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+        final var newReplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         leaderNode1.waitForMembersUp("member-2");
 
         doAddShardReplica(newReplicaNode2, "cars", "member-1");
 
-        MemberNode newReplicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+        var newReplicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         leaderNode1.waitForMembersUp("member-3");
@@ -251,18 +247,18 @@ public class ClusterAdminRpcServiceTest {
         verifyRaftPeersPresent(newReplicaNode2.operDataStore(), "cars", "member-1", "member-3");
 
         // Write data to member-2's config datastore and read/verify via member-3
-        final NormalizedNode configCarsNode = writeCarsNodeAndVerify(newReplicaNode2.configDataStore(),
+        final var configCarsNode = writeCarsNodeAndVerify(newReplicaNode2.configDataStore(),
                 newReplicaNode3.configDataStore());
 
         // Write data to member-3's oper datastore and read/verify via member-2
         writeCarsNodeAndVerify(newReplicaNode3.operDataStore(), newReplicaNode2.operDataStore());
 
         // Verify all data has been replicated. We expect 4 log entries and thus last applied index of 3 -
-        // 2 ServerConfigurationPayload entries,  the transaction payload entry plus a purge payload.
+        // 2 ServerConfigurationPayload entries, the transaction payload entry plus a purge payload.
 
         RaftStateVerifier verifier = raftState -> {
-            assertEquals("Commit index", 4, raftState.getCommitIndex());
-            assertEquals("Last applied index", 4, raftState.getLastApplied());
+            assertEquals("Commit index", 3, raftState.getCommitIndex());
+            assertEquals("Last applied index", 3, raftState.getLastApplied());
         };
 
         verifyRaftState(leaderNode1.configDataStore(), "cars", verifier);
@@ -289,34 +285,36 @@ public class ClusterAdminRpcServiceTest {
     @Test
     public void testAddShardReplicaFailures() throws Exception {
         String name = "testAddShardReplicaFailures";
-        MemberNode memberNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var memberNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig("module-shards-cars-member-1.conf").build();
 
-        final ClusterAdminRpcService service = new ClusterAdminRpcService(memberNode.configDataStore(),
-                memberNode.operDataStore(), null, null);
+        final var service = new ClusterAdminRpcService(memberNode.configDataStore(), memberNode.operDataStore(), null);
 
-        RpcResult<AddShardReplicaOutput> rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder()
-                .setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
+        var rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder()
+                .setDataStoreType(DataStoreType.Config)
+                .build())
+            .get(10, TimeUnit.SECONDS);
         verifyFailedRpcResult(rpcResult);
 
-        rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName("cars")
-                .build()).get(10, TimeUnit.SECONDS);
+        rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName("cars").build())
+            .get(10, TimeUnit.SECONDS);
         verifyFailedRpcResult(rpcResult);
 
         rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName("people")
-                .setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
+                .setDataStoreType(DataStoreType.Config)
+                .build())
+            .get(10, TimeUnit.SECONDS);
         verifyFailedRpcResult(rpcResult);
     }
 
-    private static NormalizedNode writeCarsNodeAndVerify(final AbstractDataStore writeToStore,
+    private static ContainerNode writeCarsNodeAndVerify(final AbstractDataStore writeToStore,
             final AbstractDataStore readFromStore) throws Exception {
-        DOMStoreWriteTransaction writeTx = writeToStore.newWriteOnlyTransaction();
-        NormalizedNode carsNode = CarsModel.create();
+        final var writeTx = writeToStore.newWriteOnlyTransaction();
+        final var carsNode = CarsModel.create();
         writeTx.write(CarsModel.BASE_PATH, carsNode);
 
-        DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
-        Boolean canCommit = cohort.canCommit().get(7, TimeUnit.SECONDS);
-        assertEquals("canCommit", TRUE, canCommit);
+        final var cohort = writeTx.ready();
+        assertEquals("canCommit", TRUE, cohort.canCommit().get(7, TimeUnit.SECONDS));
         cohort.preCommit().get(5, TimeUnit.SECONDS);
         cohort.commit().get(5, TimeUnit.SECONDS);
 
@@ -325,31 +323,31 @@ public class ClusterAdminRpcServiceTest {
     }
 
     private static void readCarsNodeAndVerify(final AbstractDataStore readFromStore,
-            final NormalizedNode expCarsNode) throws Exception {
-        Optional<NormalizedNode> optional = readFromStore.newReadOnlyTransaction().read(CarsModel.BASE_PATH)
-                .get(15, TimeUnit.SECONDS);
-        assertTrue("isPresent", optional.isPresent());
-        assertEquals("Data node", expCarsNode, optional.get());
+            final ContainerNode expCarsNode) throws Exception {
+        assertEquals(Optional.of(expCarsNode),
+            readFromStore.newReadOnlyTransaction().read(CarsModel.BASE_PATH).get(15, TimeUnit.SECONDS));
     }
 
     private static void doAddShardReplica(final MemberNode memberNode, final String shardName,
             final String... peerMemberNames) throws Exception {
         memberNode.waitForMembersUp(peerMemberNames);
 
-        final ClusterAdminRpcService service = new ClusterAdminRpcService(memberNode.configDataStore(),
-                memberNode.operDataStore(), null, null);
+        final var service = new ClusterAdminRpcService(memberNode.configDataStore(), memberNode.operDataStore(), null);
 
-        RpcResult<AddShardReplicaOutput> rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder()
-            .setShardName(shardName).setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
+        var rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder()
+            .setShardName(shardName)
+            .setDataStoreType(DataStoreType.Config)
+            .build()).get(10, TimeUnit.SECONDS);
         verifySuccessfulRpcResult(rpcResult);
 
         verifyRaftPeersPresent(memberNode.configDataStore(), shardName, peerMemberNames);
 
-        Optional<ActorRef> optional = memberNode.operDataStore().getActorUtils().findLocalShard(shardName);
-        assertFalse("Oper shard present", optional.isPresent());
+        assertEquals(Optional.empty(), memberNode.operDataStore().getActorUtils().findLocalShard(shardName));
 
-        rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName(shardName)
-                .setDataStoreType(DataStoreType.Operational).build()).get(10, TimeUnit.SECONDS);
+        rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder()
+            .setShardName(shardName)
+            .setDataStoreType(DataStoreType.Operational)
+            .build()).get(10, TimeUnit.SECONDS);
         verifySuccessfulRpcResult(rpcResult);
 
         verifyRaftPeersPresent(memberNode.operDataStore(), shardName, peerMemberNames);
@@ -357,12 +355,12 @@ public class ClusterAdminRpcServiceTest {
 
     private static void doMakeShardLeaderLocal(final MemberNode memberNode, final String shardName,
             final String newLeader) throws Exception {
-        final ClusterAdminRpcService service = new ClusterAdminRpcService(memberNode.configDataStore(),
-                memberNode.operDataStore(), null, null);
+        final var service = new ClusterAdminRpcService(memberNode.configDataStore(), memberNode.operDataStore(), null);
 
-        final RpcResult<MakeLeaderLocalOutput> rpcResult = service.makeLeaderLocal(new MakeLeaderLocalInputBuilder()
-                .setDataStoreType(DataStoreType.Config).setShardName(shardName).build())
-                .get(10, TimeUnit.SECONDS);
+        final var rpcResult = service.makeLeaderLocal(new MakeLeaderLocalInputBuilder()
+            .setDataStoreType(DataStoreType.Config)
+            .setShardName(shardName)
+            .build()).get(10, TimeUnit.SECONDS);
 
         verifySuccessfulRpcResult(rpcResult);
 
@@ -372,8 +370,9 @@ public class ClusterAdminRpcServiceTest {
 
     private static <T> T verifySuccessfulRpcResult(final RpcResult<T> rpcResult) {
         if (!rpcResult.isSuccessful()) {
-            if (rpcResult.getErrors().size() > 0) {
-                RpcError error = Iterables.getFirst(rpcResult.getErrors(), null);
+            final var errors = rpcResult.getErrors();
+            if (errors.size() > 0) {
+                final var error = errors.get(0);
                 throw new AssertionError("Rpc failed with error: " + error, error.getCause());
             }
 
@@ -385,8 +384,9 @@ public class ClusterAdminRpcServiceTest {
 
     private static void verifyFailedRpcResult(final RpcResult<?> rpcResult) {
         assertFalse("RpcResult", rpcResult.isSuccessful());
-        assertEquals("RpcResult errors size", 1, rpcResult.getErrors().size());
-        RpcError error = Iterables.getFirst(rpcResult.getErrors(), null);
+        final var errors = rpcResult.getErrors();
+        assertEquals("RpcResult errors size", 1, errors.size());
+        final var error = errors.get(0);
         assertNotNull("RpcResult error message null", error.getMessage());
     }
 
@@ -394,15 +394,15 @@ public class ClusterAdminRpcServiceTest {
     public void testRemoveShardReplica() throws Exception {
         String name = "testRemoveShardReplica";
         String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
-        final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
                         DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
                 .build();
 
-        final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+        final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
-        final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+        final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         leaderNode1.configDataStore().waitTillReady();
@@ -413,12 +413,13 @@ public class ClusterAdminRpcServiceTest {
 
         // Invoke RPC service on member-3 to remove it's local shard
 
-        final ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
-                replicaNode3.operDataStore(), null, null);
+        final var service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(), replicaNode3.operDataStore(),
+            null);
 
-        RpcResult<RemoveShardReplicaOutput> rpcResult = service3.removeShardReplica(new RemoveShardReplicaInputBuilder()
-                .setShardName("cars").setMemberName("member-3").setDataStoreType(DataStoreType.Config).build())
-                .get(10, TimeUnit.SECONDS);
+        var rpcResult = service3.removeShardReplica(new RemoveShardReplicaInputBuilder()
+            .setShardName("cars").setMemberName("member-3")
+            .setDataStoreType(DataStoreType.Config)
+            .build()).get(10, TimeUnit.SECONDS);
         verifySuccessfulRpcResult(rpcResult);
 
         verifyRaftPeersPresent(leaderNode1.configDataStore(), "cars", "member-2");
@@ -430,7 +431,7 @@ public class ClusterAdminRpcServiceTest {
         Cluster.get(leaderNode1.kit().getSystem()).down(Cluster.get(replicaNode2.kit().getSystem()).selfAddress());
         replicaNode2.cleanup();
 
-        MemberNode newPeplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+        final var newPeplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         newPeplicaNode2.configDataStore().waitTillReady();
@@ -438,11 +439,14 @@ public class ClusterAdminRpcServiceTest {
 
         // Invoke RPC service on member-1 to remove member-2
 
-        final ClusterAdminRpcService service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(),
-                leaderNode1.operDataStore(), null, null);
+        final var service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+            null);
 
-        rpcResult = service1.removeShardReplica(new RemoveShardReplicaInputBuilder().setShardName("cars")
-                .setMemberName("member-2").setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
+        rpcResult = service1.removeShardReplica(new RemoveShardReplicaInputBuilder()
+            .setShardName("cars")
+            .setMemberName("member-2")
+            .setDataStoreType(DataStoreType.Config)
+            .build()).get(10, TimeUnit.SECONDS);
         verifySuccessfulRpcResult(rpcResult);
 
         verifyRaftPeersPresent(leaderNode1.configDataStore(), "cars");
@@ -453,15 +457,15 @@ public class ClusterAdminRpcServiceTest {
     public void testRemoveShardLeaderReplica() throws Exception {
         String name = "testRemoveShardLeaderReplica";
         String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
-        final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
                         DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
                 .build();
 
-        final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+        final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
-        final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+        final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         leaderNode1.configDataStore().waitTillReady();
@@ -474,12 +478,14 @@ public class ClusterAdminRpcServiceTest {
 
         // Invoke RPC service on leader member-1 to remove it's local shard
 
-        final ClusterAdminRpcService service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(),
-                leaderNode1.operDataStore(), null, null);
+        final var service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+            null);
 
-        RpcResult<RemoveShardReplicaOutput> rpcResult = service1.removeShardReplica(new RemoveShardReplicaInputBuilder()
-                .setShardName("cars").setMemberName("member-1").setDataStoreType(DataStoreType.Config).build())
-                .get(10, TimeUnit.SECONDS);
+        final var rpcResult = service1.removeShardReplica(new RemoveShardReplicaInputBuilder()
+            .setShardName("cars")
+            .setMemberName("member-1")
+            .setDataStoreType(DataStoreType.Config)
+            .build()).get(10, TimeUnit.SECONDS);
         verifySuccessfulRpcResult(rpcResult);
 
         verifyRaftState(replicaNode2.configDataStore(), "cars", raftState ->
@@ -495,17 +501,17 @@ public class ClusterAdminRpcServiceTest {
     public void testAddReplicasForAllShards() throws Exception {
         String name = "testAddReplicasForAllShards";
         String moduleShardsConfig = "module-shards-member1.conf";
-        MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).waitForShardLeader("cars", "people").build();
 
-        ModuleShardConfiguration petsModuleConfig = new ModuleShardConfiguration(
-            XMLNamespace.of("pets-ns"), "pets-module", "pets", null, List.of(MEMBER_1));
+        final var petsModuleConfig = new ModuleShardConfiguration(XMLNamespace.of("pets-ns"), "pets-module", "pets",
+            null, List.of(MEMBER_1));
         leaderNode1.configDataStore().getActorUtils().getShardManager().tell(
                 new CreateShard(petsModuleConfig, Shard.builder(), null), leaderNode1.kit().getRef());
         leaderNode1.kit().expectMsgClass(Success.class);
         leaderNode1.kit().waitUntilLeader(leaderNode1.configDataStore().getActorUtils(), "pets");
 
-        MemberNode newReplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+        final var newReplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         leaderNode1.waitForMembersUp("member-2");
@@ -515,19 +521,18 @@ public class ClusterAdminRpcServiceTest {
                 new CreateShard(petsModuleConfig, Shard.builder(), null), newReplicaNode2.kit().getRef());
         newReplicaNode2.kit().expectMsgClass(Success.class);
 
-        newReplicaNode2.operDataStore().getActorUtils().getShardManager().tell(
-                new CreateShard(new ModuleShardConfiguration(XMLNamespace.of("no-leader-ns"), "no-leader-module",
-                                                             "no-leader", null, List.of(MEMBER_1)),
-                                Shard.builder(), null),
-                                newReplicaNode2.kit().getRef());
+        newReplicaNode2.operDataStore().getActorUtils().getShardManager()
+            .tell(new CreateShard(new ModuleShardConfiguration(XMLNamespace.of("no-leader-ns"), "no-leader-module",
+                "no-leader", null, List.of(MEMBER_1)),
+                Shard.builder(), null), newReplicaNode2.kit().getRef());
         newReplicaNode2.kit().expectMsgClass(Success.class);
 
-        final ClusterAdminRpcService service = new ClusterAdminRpcService(newReplicaNode2.configDataStore(),
-                newReplicaNode2.operDataStore(), null, null);
+        final var service = new ClusterAdminRpcService(newReplicaNode2.configDataStore(),
+            newReplicaNode2.operDataStore(), null);
 
-        RpcResult<AddReplicasForAllShardsOutput> rpcResult = service.addReplicasForAllShards(
-            new AddReplicasForAllShardsInputBuilder().build()).get(10, TimeUnit.SECONDS);
-        AddReplicasForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
+        var rpcResult = service.addReplicasForAllShards(new AddReplicasForAllShardsInputBuilder().build())
+            .get(10, TimeUnit.SECONDS);
+        final var result = verifySuccessfulRpcResult(rpcResult);
         verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
                 successShardResult("people", DataStoreType.Config),
                 successShardResult("pets", DataStoreType.Config),
@@ -546,15 +551,15 @@ public class ClusterAdminRpcServiceTest {
     public void testRemoveAllShardReplicas() throws Exception {
         String name = "testRemoveAllShardReplicas";
         String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
-        final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
                         DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
                 .build();
 
-        final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+        final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
-        final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+        final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         leaderNode1.configDataStore().waitTillReady();
@@ -562,8 +567,8 @@ public class ClusterAdminRpcServiceTest {
         verifyRaftPeersPresent(replicaNode2.configDataStore(), "cars", "member-1", "member-3");
         verifyRaftPeersPresent(replicaNode3.configDataStore(), "cars", "member-1", "member-2");
 
-        ModuleShardConfiguration petsModuleConfig = new ModuleShardConfiguration(XMLNamespace.of("pets-ns"),
-                "pets-module", "pets", null, List.of(MEMBER_1, MEMBER_2, MEMBER_3));
+        final var petsModuleConfig = new ModuleShardConfiguration(XMLNamespace.of("pets-ns"), "pets-module", "pets",
+            null, List.of(MEMBER_1, MEMBER_2, MEMBER_3));
         leaderNode1.configDataStore().getActorUtils().getShardManager().tell(
                 new CreateShard(petsModuleConfig, Shard.builder(), null), leaderNode1.kit().getRef());
         leaderNode1.kit().expectMsgClass(Success.class);
@@ -580,12 +585,13 @@ public class ClusterAdminRpcServiceTest {
         verifyRaftPeersPresent(replicaNode2.configDataStore(), "pets", "member-1", "member-3");
         verifyRaftPeersPresent(replicaNode3.configDataStore(), "pets", "member-1", "member-2");
 
-        final ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
-                replicaNode3.operDataStore(), null, null);
+        final var service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(), replicaNode3.operDataStore(),
+            null);
 
-        RpcResult<RemoveAllShardReplicasOutput> rpcResult = service3.removeAllShardReplicas(
-                new RemoveAllShardReplicasInputBuilder().setMemberName("member-3").build()).get(10, TimeUnit.SECONDS);
-        RemoveAllShardReplicasOutput result = verifySuccessfulRpcResult(rpcResult);
+        var rpcResult = service3.removeAllShardReplicas(
+                new RemoveAllShardReplicasInputBuilder().setMemberName("member-3").build())
+            .get(10, TimeUnit.SECONDS);
+        final var result = verifySuccessfulRpcResult(rpcResult);
         verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
                 successShardResult("people", DataStoreType.Config),
                 successShardResult("pets", DataStoreType.Config),
@@ -607,15 +613,15 @@ public class ClusterAdminRpcServiceTest {
     public void testChangeMemberVotingStatesForShard() throws Exception {
         String name = "testChangeMemberVotingStatusForShard";
         String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
-        final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
                         DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
                 .build();
 
-        final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+        final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
-        final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+        final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         leaderNode1.configDataStore().waitTillReady();
@@ -626,32 +632,31 @@ public class ClusterAdminRpcServiceTest {
 
         // Invoke RPC service on member-3 to change voting status
 
-        final ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
-                replicaNode3.operDataStore(), null, null);
+        final var service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(), replicaNode3.operDataStore(),
+            null);
 
-        RpcResult<ChangeMemberVotingStatesForShardOutput> rpcResult = service3
-                .changeMemberVotingStatesForShard(new ChangeMemberVotingStatesForShardInputBuilder()
-                        .setShardName("cars").setDataStoreType(DataStoreType.Config)
-                        .setMemberVotingState(List.of(
-                                new MemberVotingStateBuilder().setMemberName("member-2").setVoting(FALSE).build(),
-                                new MemberVotingStateBuilder().setMemberName("member-3").setVoting(FALSE).build()))
-                        .build())
-                .get(10, TimeUnit.SECONDS);
+        var rpcResult = service3.changeMemberVotingStatesForShard(new ChangeMemberVotingStatesForShardInputBuilder()
+            .setShardName("cars").setDataStoreType(DataStoreType.Config)
+            .setMemberVotingState(List.of(
+                new MemberVotingStateBuilder().setMemberName("member-2").setVoting(FALSE).build(),
+                new MemberVotingStateBuilder().setMemberName("member-3").setVoting(FALSE).build()))
+            .build())
+            .get(10, TimeUnit.SECONDS);
         verifySuccessfulRpcResult(rpcResult);
 
-        verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE),
-                new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE));
-        verifyVotingStates(replicaNode2.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE),
-                new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE));
-        verifyVotingStates(replicaNode3.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE),
-                new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE));
+        verifyVotingStates(leaderNode1.configDataStore(), "cars",
+            new ExpState("member-1", true), new ExpState("member-2", false), new ExpState("member-3", false));
+        verifyVotingStates(replicaNode2.configDataStore(), "cars",
+            new ExpState("member-1", true), new ExpState("member-2", false), new ExpState("member-3", false));
+        verifyVotingStates(replicaNode3.configDataStore(), "cars",
+            new ExpState("member-1", true), new ExpState("member-2", false), new ExpState("member-3", false));
     }
 
     @Test
     public void testChangeMemberVotingStatesForSingleNodeShard() throws Exception {
         String name = "testChangeMemberVotingStatesForSingleNodeShard";
         String moduleShardsConfig = "module-shards-member1.conf";
-        MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
                         DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
                 .build();
@@ -660,36 +665,39 @@ public class ClusterAdminRpcServiceTest {
 
         // Invoke RPC service on member-3 to change voting status
 
-        final ClusterAdminRpcService service = new ClusterAdminRpcService(leaderNode.configDataStore(),
-                leaderNode.operDataStore(), null, null);
-
-        RpcResult<ChangeMemberVotingStatesForShardOutput> rpcResult = service
-                .changeMemberVotingStatesForShard(new ChangeMemberVotingStatesForShardInputBuilder()
-                        .setShardName("cars").setDataStoreType(DataStoreType.Config)
-                        .setMemberVotingState(List.of(new MemberVotingStateBuilder()
-                            .setMemberName("member-1")
-                            .setVoting(FALSE)
-                            .build()))
-                        .build())
-                .get(10, TimeUnit.SECONDS);
+        final var service = new ClusterAdminRpcService(leaderNode.configDataStore(), leaderNode.operDataStore(), null);
+
+        final var rpcResult = service.changeMemberVotingStatesForShard(
+            new ChangeMemberVotingStatesForShardInputBuilder()
+                .setShardName("cars").setDataStoreType(DataStoreType.Config)
+                .setMemberVotingState(List.of(new MemberVotingStateBuilder()
+                    .setMemberName("member-1")
+                    .setVoting(FALSE)
+                    .build()))
+                .build())
+            .get(10, TimeUnit.SECONDS);
         verifyFailedRpcResult(rpcResult);
 
-        verifyVotingStates(leaderNode.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE));
+        verifyVotingStates(leaderNode.configDataStore(), "cars", new ExpState("member-1", true));
     }
 
     @Test
     public void testChangeMemberVotingStatesForAllShards() throws Exception {
         String name = "testChangeMemberVotingStatesForAllShards";
         String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
-        final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
-                .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
-                        DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
-                .build();
-
-        final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+        final var leaderNode1 = MemberNode.builder(memberNodes)
+            .akkaConfig("Member1")
+            .testName(name)
+            .moduleShardsConfig(moduleShardsConfig)
+            .datastoreContextBuilder(DatastoreContext.newBuilder()
+                .shardHeartbeatIntervalInMillis(300)
+                .shardElectionTimeoutFactor(1))
+            .build();
+
+        final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
-        final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+        final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         leaderNode1.configDataStore().waitTillReady();
@@ -702,75 +710,78 @@ public class ClusterAdminRpcServiceTest {
 
         // Invoke RPC service on member-3 to change voting status
 
-        final ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
-                replicaNode3.operDataStore(), null, null);
+        final var service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
+                replicaNode3.operDataStore(), null);
 
-        RpcResult<ChangeMemberVotingStatesForAllShardsOutput> rpcResult = service3.changeMemberVotingStatesForAllShards(
-                new ChangeMemberVotingStatesForAllShardsInputBuilder().setMemberVotingState(List.of(
+        final var rpcResult = service3.changeMemberVotingStatesForAllShards(
+            new ChangeMemberVotingStatesForAllShardsInputBuilder()
+                .setMemberVotingState(List.of(
                         new MemberVotingStateBuilder().setMemberName("member-2").setVoting(FALSE).build(),
-                        new MemberVotingStateBuilder().setMemberName("member-3").setVoting(FALSE).build())).build())
+                        new MemberVotingStateBuilder().setMemberName("member-3").setVoting(FALSE).build()))
+                .build())
                 .get(10, TimeUnit.SECONDS);
-        ChangeMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
+        final var result = verifySuccessfulRpcResult(rpcResult);
         verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
                 successShardResult("people", DataStoreType.Config),
                 successShardResult("cars", DataStoreType.Operational),
                 successShardResult("people", DataStoreType.Operational));
 
-        verifyVotingStates(new AbstractDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
-                replicaNode2.configDataStore(), replicaNode2.operDataStore(),
-                replicaNode3.configDataStore(), replicaNode3.operDataStore()},
-                new String[]{"cars", "people"}, new SimpleEntry<>("member-1", TRUE),
-                new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE));
+        verifyVotingStates(new ClientBackedDataStore[] {
+            leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+            replicaNode2.configDataStore(), replicaNode2.operDataStore(),
+            replicaNode3.configDataStore(), replicaNode3.operDataStore()
+        }, new String[] { "cars", "people" },
+            new ExpState("member-1", true), new ExpState("member-2", false), new ExpState("member-3", false));
     }
 
     @Test
     public void testFlipMemberVotingStates() throws Exception {
         String name = "testFlipMemberVotingStates";
 
-        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of(
-                new ServerInfo("member-1", true), new ServerInfo("member-2", true),
-                new ServerInfo("member-3", false)));
+        final var persistedServerConfig = new ServerConfigurationPayload(List.of(
+            new ServerInfo("member-1", true), new ServerInfo("member-2", true), new ServerInfo("member-3", false)));
 
         setupPersistedServerConfigPayload(persistedServerConfig, "member-1", name, "cars", "people");
         setupPersistedServerConfigPayload(persistedServerConfig, "member-2", name, "cars", "people");
         setupPersistedServerConfigPayload(persistedServerConfig, "member-3", name, "cars", "people");
 
         String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
-        final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(DatastoreContext.newBuilder()
                         .shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(10))
                 .build();
 
-        final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+        final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
-        final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+        final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         leaderNode1.configDataStore().waitTillReady();
         leaderNode1.operDataStore().waitTillReady();
         replicaNode3.configDataStore().waitTillReady();
         replicaNode3.operDataStore().waitTillReady();
-        verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE),
-                new SimpleEntry<>("member-2", TRUE), new SimpleEntry<>("member-3", FALSE));
+        verifyVotingStates(leaderNode1.configDataStore(), "cars",
+            new ExpState("member-1", true), new ExpState("member-2", true), new ExpState("member-3", false));
 
-        final ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
-                replicaNode3.operDataStore(), null, null);
+        final var service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(), replicaNode3.operDataStore(),
+            null);
 
-        RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service3.flipMemberVotingStatesForAllShards(
-            new FlipMemberVotingStatesForAllShardsInputBuilder().build()).get(10, TimeUnit.SECONDS);
-        FlipMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
+        var rpcResult = service3.flipMemberVotingStatesForAllShards(
+            new FlipMemberVotingStatesForAllShardsInputBuilder().build())
+            .get(10, TimeUnit.SECONDS);
+        var result = verifySuccessfulRpcResult(rpcResult);
         verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
                 successShardResult("people", DataStoreType.Config),
                 successShardResult("cars", DataStoreType.Operational),
                 successShardResult("people", DataStoreType.Operational));
 
-        verifyVotingStates(new AbstractDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
-                replicaNode2.configDataStore(), replicaNode2.operDataStore(),
-                replicaNode3.configDataStore(), replicaNode3.operDataStore()},
-                new String[]{"cars", "people"},
-                new SimpleEntry<>("member-1", FALSE), new SimpleEntry<>("member-2", FALSE),
-                new SimpleEntry<>("member-3", TRUE));
+        verifyVotingStates(new ClientBackedDataStore[] {
+            leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+            replicaNode2.configDataStore(), replicaNode2.operDataStore(),
+            replicaNode3.configDataStore(), replicaNode3.operDataStore()
+        }, new String[] { "cars", "people" },
+            new ExpState("member-1", false), new ExpState("member-2", false), new ExpState("member-3", true));
 
         // Leadership should have transferred to member 3 since it is the only remaining voting member.
         verifyRaftState(leaderNode1.configDataStore(), "cars", raftState -> {
@@ -788,19 +799,20 @@ public class ClusterAdminRpcServiceTest {
         // Flip the voting states back to the original states.
 
         rpcResult = service3.flipMemberVotingStatesForAllShards(
-            new FlipMemberVotingStatesForAllShardsInputBuilder().build()).get(10, TimeUnit.SECONDS);
+            new FlipMemberVotingStatesForAllShardsInputBuilder().build())
+            .get(10, TimeUnit.SECONDS);
         result = verifySuccessfulRpcResult(rpcResult);
         verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
                 successShardResult("people", DataStoreType.Config),
                 successShardResult("cars", DataStoreType.Operational),
                 successShardResult("people", DataStoreType.Operational));
 
-        verifyVotingStates(new AbstractDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
-                replicaNode2.configDataStore(), replicaNode2.operDataStore(),
-                replicaNode3.configDataStore(), replicaNode3.operDataStore()},
-                new String[]{"cars", "people"},
-                new SimpleEntry<>("member-1", TRUE), new SimpleEntry<>("member-2", TRUE),
-                new SimpleEntry<>("member-3", FALSE));
+        verifyVotingStates(new ClientBackedDataStore[] {
+            leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+            replicaNode2.configDataStore(), replicaNode2.operDataStore(),
+            replicaNode3.configDataStore(), replicaNode3.operDataStore()
+        }, new String[] { "cars", "people" },
+            new ExpState("member-1", true), new ExpState("member-2", true), new ExpState("member-3", false));
 
         // Leadership should have transferred to member 1 or 2.
         verifyRaftState(leaderNode1.configDataStore(), "cars", raftState -> {
@@ -816,7 +828,7 @@ public class ClusterAdminRpcServiceTest {
 
         // Members 1, 2, and 3 are initially started up as non-voting. Members 4, 5, and 6 are initially
         // non-voting and simulated as down by not starting them up.
-        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of(
+        final var persistedServerConfig = new ServerConfigurationPayload(List.of(
                 new ServerInfo("member-1", false), new ServerInfo("member-2", false),
                 new ServerInfo("member-3", false), new ServerInfo("member-4", true),
                 new ServerInfo("member-5", true), new ServerInfo("member-6", true)));
@@ -826,47 +838,47 @@ public class ClusterAdminRpcServiceTest {
         setupPersistedServerConfigPayload(persistedServerConfig, "member-3", name, "cars", "people");
 
         String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
-        final MemberNode replicaNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var replicaNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
                         DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
                 .build();
 
-        final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+        final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
-        final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+        final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         // Initially there won't be a leader b/c all the up nodes are non-voting.
 
         replicaNode1.waitForMembersUp("member-2", "member-3");
 
-        verifyVotingStates(replicaNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", FALSE),
-                new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE),
-                new SimpleEntry<>("member-4", TRUE), new SimpleEntry<>("member-5", TRUE),
-                new SimpleEntry<>("member-6", TRUE));
+        verifyVotingStates(replicaNode1.configDataStore(), "cars",
+            new ExpState("member-1", false), new ExpState("member-2", false), new ExpState("member-3", false),
+            new ExpState("member-4", true), new ExpState("member-5", true), new ExpState("member-6", true));
 
         verifyRaftState(replicaNode1.configDataStore(), "cars", raftState ->
             assertEquals("Expected raft state", RaftState.Follower.toString(), raftState.getRaftState()));
 
-        final ClusterAdminRpcService service1 = new ClusterAdminRpcService(replicaNode1.configDataStore(),
-                replicaNode1.operDataStore(), null, null);
+        final var service1 = new ClusterAdminRpcService(replicaNode1.configDataStore(), replicaNode1.operDataStore(),
+            null);
 
-        RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service1.flipMemberVotingStatesForAllShards(
-            new FlipMemberVotingStatesForAllShardsInputBuilder().build()).get(10, TimeUnit.SECONDS);
-        FlipMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
+        final var rpcResult = service1.flipMemberVotingStatesForAllShards(
+            new FlipMemberVotingStatesForAllShardsInputBuilder().build())
+            .get(10, TimeUnit.SECONDS);
+        final var result = verifySuccessfulRpcResult(rpcResult);
         verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
                 successShardResult("people", DataStoreType.Config),
                 successShardResult("cars", DataStoreType.Operational),
                 successShardResult("people", DataStoreType.Operational));
 
-        verifyVotingStates(new AbstractDataStore[]{replicaNode1.configDataStore(), replicaNode1.operDataStore(),
-                replicaNode2.configDataStore(), replicaNode2.operDataStore(),
-                replicaNode3.configDataStore(), replicaNode3.operDataStore()},
-                new String[]{"cars", "people"},
-                new SimpleEntry<>("member-1", TRUE), new SimpleEntry<>("member-2", TRUE),
-                new SimpleEntry<>("member-3", TRUE), new SimpleEntry<>("member-4", FALSE),
-                new SimpleEntry<>("member-5", FALSE), new SimpleEntry<>("member-6", FALSE));
+        verifyVotingStates(new ClientBackedDataStore[] {
+            replicaNode1.configDataStore(), replicaNode1.operDataStore(),
+            replicaNode2.configDataStore(), replicaNode2.operDataStore(),
+            replicaNode3.configDataStore(), replicaNode3.operDataStore()
+        }, new String[] { "cars", "people" },
+            new ExpState("member-1", true), new ExpState("member-2", true), new ExpState("member-3", true),
+            new ExpState("member-4", false), new ExpState("member-5", false), new ExpState("member-6", false));
 
         // Since member 1 was changed to voting and there was no leader, it should've started and election
         // and become leader
@@ -888,7 +900,7 @@ public class ClusterAdminRpcServiceTest {
         String name = "testFlipMemberVotingStatesWithVotingMembersDown";
 
         // Members 4, 5, and 6 are initially non-voting and simulated as down by not starting them up.
-        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of(
+        final var persistedServerConfig = new ServerConfigurationPayload(List.of(
                 new ServerInfo("member-1", true), new ServerInfo("member-2", true),
                 new ServerInfo("member-3", true), new ServerInfo("member-4", false),
                 new ServerInfo("member-5", false), new ServerInfo("member-6", false)));
@@ -898,43 +910,43 @@ public class ClusterAdminRpcServiceTest {
         setupPersistedServerConfigPayload(persistedServerConfig, "member-3", name, "cars", "people");
 
         String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
-        final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
                         DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
                 .build();
 
-        final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+        final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
-        final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+        final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         leaderNode1.configDataStore().waitTillReady();
         leaderNode1.operDataStore().waitTillReady();
-        verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE),
-                new SimpleEntry<>("member-2", TRUE), new SimpleEntry<>("member-3", TRUE),
-                new SimpleEntry<>("member-4", FALSE), new SimpleEntry<>("member-5", FALSE),
-                new SimpleEntry<>("member-6", FALSE));
+        verifyVotingStates(leaderNode1.configDataStore(), "cars",
+            new ExpState("member-1", true), new ExpState("member-2", true), new ExpState("member-3", true),
+            new ExpState("member-4", false), new ExpState("member-5", false), new ExpState("member-6", false));
 
-        final ClusterAdminRpcService service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(),
-                leaderNode1.operDataStore(), null, null);
+        final var service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+            null);
 
-        RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service1.flipMemberVotingStatesForAllShards(
-            new FlipMemberVotingStatesForAllShardsInputBuilder().build()).get(10, TimeUnit.SECONDS);
-        FlipMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
+        final var rpcResult = service1.flipMemberVotingStatesForAllShards(
+            new FlipMemberVotingStatesForAllShardsInputBuilder().build())
+            .get(10, TimeUnit.SECONDS);
+        final var result = verifySuccessfulRpcResult(rpcResult);
         verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
                 successShardResult("people", DataStoreType.Config),
                 successShardResult("cars", DataStoreType.Operational),
                 successShardResult("people", DataStoreType.Operational));
 
         // Members 2 and 3 are now non-voting but should get replicated with the new new server config.
-        verifyVotingStates(new AbstractDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
-                replicaNode2.configDataStore(), replicaNode2.operDataStore(),
-                replicaNode3.configDataStore(), replicaNode3.operDataStore()},
-                new String[]{"cars", "people"},
-                new SimpleEntry<>("member-1", FALSE), new SimpleEntry<>("member-2", FALSE),
-                new SimpleEntry<>("member-3", FALSE), new SimpleEntry<>("member-4", TRUE),
-                new SimpleEntry<>("member-5", TRUE), new SimpleEntry<>("member-6", TRUE));
+        verifyVotingStates(new ClientBackedDataStore[] {
+            leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+            replicaNode2.configDataStore(), replicaNode2.operDataStore(),
+            replicaNode3.configDataStore(), replicaNode3.operDataStore()
+        }, new String[] { "cars", "people" },
+            new ExpState("member-1", false), new ExpState("member-2", false), new ExpState("member-3", false),
+            new ExpState("member-4", true), new ExpState("member-5", true), new ExpState("member-6", true));
 
         // The leader (member 1) was changed to non-voting but it shouldn't be able to step down as leader yet
         // b/c it can't get a majority consensus with all voting members down. So verify it remains the leader.
@@ -946,12 +958,12 @@ public class ClusterAdminRpcServiceTest {
 
     private static void setupPersistedServerConfigPayload(final ServerConfigurationPayload serverConfig,
             final String member, final String datastoreTypeSuffix, final String... shards) {
-        String[] datastoreTypes = {"config_", "oper_"};
+        String[] datastoreTypes = { "config_", "oper_" };
         for (String type : datastoreTypes) {
             for (String shard : shards) {
-                List<ServerInfo> newServerInfo = new ArrayList<>(serverConfig.getServerConfig().size());
-                for (ServerInfo info : serverConfig.getServerConfig()) {
-                    newServerInfo.add(new ServerInfo(ShardIdentifier.create(shard, MemberName.forName(info.getId()),
+                final var newServerInfo = new ArrayList<ServerInfo>(serverConfig.getServerConfig().size());
+                for (var info : serverConfig.getServerConfig()) {
+                    newServerInfo.add(new ServerInfo(ShardIdentifier.create(shard, MemberName.forName(info.peerId()),
                             type + datastoreTypeSuffix).toString(), info.isVoting()));
                 }
 
@@ -964,45 +976,43 @@ public class ClusterAdminRpcServiceTest {
         }
     }
 
-    @SafeVarargs
-    private static void verifyVotingStates(final AbstractDataStore[] datastores, final String[] shards,
-            final SimpleEntry<String, Boolean>... expStates) throws Exception {
-        for (AbstractDataStore datastore: datastores) {
-            for (String shard: shards) {
+    private static void verifyVotingStates(final ClientBackedDataStore[] datastores, final String[] shards,
+            final ExpState... expStates) throws Exception {
+        for (var datastore : datastores) {
+            for (String shard : shards) {
                 verifyVotingStates(datastore, shard, expStates);
             }
         }
     }
 
-    @SafeVarargs
-    private static void verifyVotingStates(final AbstractDataStore datastore, final String shardName,
-            final SimpleEntry<String, Boolean>... expStates) throws Exception {
+    private static void verifyVotingStates(final ClientBackedDataStore datastore, final String shardName,
+            final ExpState... expStates) throws Exception {
         String localMemberName = datastore.getActorUtils().getCurrentMemberName().getName();
-        Map<String, Boolean> expStateMap = new HashMap<>();
-        for (Entry<String, Boolean> e: expStates) {
-            expStateMap.put(ShardIdentifier.create(shardName, MemberName.forName(e.getKey()),
-                    datastore.getActorUtils().getDataStoreName()).toString(), e.getValue());
+        var expStateMap = new HashMap<String, Boolean>();
+        for (var expState : expStates) {
+            expStateMap.put(ShardIdentifier.create(shardName, MemberName.forName(expState.name),
+                datastore.getActorUtils().getDataStoreName()).toString(), expState.voting);
         }
 
         verifyRaftState(datastore, shardName, raftState -> {
             String localPeerId = ShardIdentifier.create(shardName, MemberName.forName(localMemberName),
                     datastore.getActorUtils().getDataStoreName()).toString();
             assertEquals("Voting state for " + localPeerId, expStateMap.get(localPeerId), raftState.isVoting());
-            for (Entry<String, Boolean> e: raftState.getPeerVotingStates().entrySet()) {
-                assertEquals("Voting state for " + e.getKey(), expStateMap.get(e.getKey()), e.getValue());
+            for (var entry : raftState.getPeerVotingStates().entrySet()) {
+                assertEquals("Voting state for " + entry.getKey(), expStateMap.get(entry.getKey()), entry.getValue());
             }
         });
     }
 
     private static void verifyShardResults(final Map<ShardResultKey, ShardResult> shardResults,
             final ShardResult... expShardResults) {
-        Map<String, ShardResult> expResultsMap = new HashMap<>();
-        for (ShardResult r: expShardResults) {
+        var expResultsMap = new HashMap<String, ShardResult>();
+        for (var r : expShardResults) {
             expResultsMap.put(r.getShardName() + "-" + r.getDataStoreType(), r);
         }
 
-        for (ShardResult result: shardResults.values()) {
-            ShardResult exp = expResultsMap.remove(result.getShardName() + "-" + result.getDataStoreType());
+        for (var result : shardResults.values()) {
+            var exp = expResultsMap.remove(result.getShardName() + "-" + result.getDataStoreType());
             assertNotNull(String.format("Unexpected result for shard %s, type %s", result.getShardName(),
                     result.getDataStoreType()), exp);
             assertEquals("isSucceeded", exp.getSucceeded(), result.getSucceeded());
index d1a9fbd27ebbe0c8b5e7c31f4817c6f2a7efdcb6..481adfd32fe78047fca10d1a9ee2c923993341d0 100644 (file)
@@ -5,7 +5,7 @@
     <parent>
         <artifactId>mdsal-parent</artifactId>
         <groupId>org.opendaylight.controller</groupId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../parent/pom.xml</relativePath>
     </parent>
     <modelVersion>4.0.0</modelVersion>
index 6f7b8d4522947550109d9e78eaf6b50901b6ba47..ec23a081591ecd41a8f1f42e573ad78783f71e91 100644 (file)
@@ -11,20 +11,20 @@ import com.google.common.util.concurrent.ListenableFuture;
 import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ActivateEosDatacenter;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ActivateEosDatacenterInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
 @Service
 @Command(scope = "cluster-admin", name = "activate-eos-datacenter", description = "Run an activate-eos-datacenter test")
 public class ActivateEosDatacenterCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcConsumerRegistry;
+    private RpcService rpcService;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcConsumerRegistry.getRpcService(ClusterAdminService.class)
-                .activateEosDatacenter(new ActivateEosDatacenterInputBuilder().build());
+        return rpcService.getRpc(ActivateEosDatacenter.class)
+            .invoke(new ActivateEosDatacenterInputBuilder().build());
     }
 }
index 5189f08efc5d15ea1be1895366323e5af7102cf8..51f086b7229c767ca7c3b0c502683bbda45cb880 100644 (file)
@@ -11,9 +11,9 @@ import com.google.common.util.concurrent.ListenableFuture;
 import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShards;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
 @Service
@@ -21,11 +21,11 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
         description = "Run an add-replicas-for-all-shards test")
 public class AddReplicasForAllShardsCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcConsumerRegistry;
+    private RpcService rpcService;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcConsumerRegistry.getRpcService(ClusterAdminService.class)
-                .addReplicasForAllShards(new AddReplicasForAllShardsInputBuilder().build());
+        return rpcService.getRpc(AddReplicasForAllShards.class)
+            .invoke(new AddReplicasForAllShardsInputBuilder().build());
     }
 }
index 77c039e9a39a312c92588c7413b0a15722295955..3639fbd5b6f67012e1299d6185488776e86ed332 100644 (file)
@@ -12,9 +12,9 @@ import org.apache.karaf.shell.api.action.Argument;
 import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplica;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -22,7 +22,7 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 @Command(scope = "cluster-admin", name = "add-shard-replica", description = "Run an add-shard-replica test")
 public class AddShardReplicaCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcConsumerRegistry;
+    private RpcService rpcService;
     @Argument(index = 0, name = "shard-name", required = true)
     private String shardName;
     @Argument(index = 1, name = "data-store-type", required = true, description = "config / operational")
@@ -30,10 +30,10 @@ public class AddShardReplicaCommand extends AbstractRpcAction {
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcConsumerRegistry.getRpcService(ClusterAdminService.class)
-                .addShardReplica(new AddShardReplicaInputBuilder()
+        return rpcService.getRpc(AddShardReplica.class)
+                .invoke(new AddShardReplicaInputBuilder()
                         .setShardName(shardName)
-                        .setDataStoreType(DataStoreType.forName(dataStoreType).orElse(null))
+                        .setDataStoreType(DataStoreType.forName(dataStoreType))
                         .build());
     }
 }
index 9574bc0edae948b9cd7b209c9901ae864a79143d..4f19d0f3a9ee2343c49fca98a828f55c02da58d5 100644 (file)
@@ -12,9 +12,9 @@ import org.apache.karaf.shell.api.action.Argument;
 import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastore;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.Uint32;
 
@@ -22,7 +22,7 @@ import org.opendaylight.yangtools.yang.common.Uint32;
 @Command(scope = "cluster-admin", name = "backup-datastore", description = "Run a backup-datastore test")
 public class BackupDatastoreCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcConsumerRegistry;
+    private RpcService rpcService;
     @Argument(index = 0, name = "file-path", required = true)
     private String filePath;
     @Argument(index = 1, name = "timeout", required = true)
@@ -30,8 +30,8 @@ public class BackupDatastoreCommand extends AbstractRpcAction {
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcConsumerRegistry.getRpcService(ClusterAdminService.class)
-                .backupDatastore(new BackupDatastoreInputBuilder()
+        return rpcService.getRpc(BackupDatastore.class)
+                .invoke(new BackupDatastoreInputBuilder()
                         .setFilePath(filePath)
                         .setTimeout(Uint32.valueOf(timeout))
                         .build());
index d8fb0a423211cf5cf3001395838451c7408f6ea7..cef7e9d938914a66203b4c0e9acfed17670e4f14 100644 (file)
@@ -13,9 +13,9 @@ import org.apache.karaf.shell.api.action.Argument;
 import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShards;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.member.voting.states.input.MemberVotingState;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.member.voting.states.input.MemberVotingStateBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
@@ -26,7 +26,7 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
         description = "Run a change-member-voting-states-for-all-shards test")
 public class ChangeMemberVotingStatesForAllShardsCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcConsumerRegistry;
+    private RpcService rpcService;
     @Argument(index = 0, name = "member-name", required = true)
     private String memberName;
     @Argument(index = 1, name = "voting", required = true)
@@ -39,8 +39,8 @@ public class ChangeMemberVotingStatesForAllShardsCommand extends AbstractRpcActi
                 .setVoting(voting)
                 .build();
 
-        return rpcConsumerRegistry.getRpcService(ClusterAdminService.class)
-                .changeMemberVotingStatesForAllShards(new ChangeMemberVotingStatesForAllShardsInputBuilder()
+        return rpcService.getRpc(ChangeMemberVotingStatesForAllShards.class)
+                .invoke(new ChangeMemberVotingStatesForAllShardsInputBuilder()
                         .setMemberVotingState(List.of(memberVotingState))
                         .build());
     }
index cbfd5704afd172de5968cef134c457ad739e78fa..e98a3090b61205924d01e920c24ec93b42dac533 100644 (file)
@@ -14,9 +14,9 @@ import org.apache.karaf.shell.api.action.Argument;
 import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShard;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.member.voting.states.input.MemberVotingState;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.member.voting.states.input.MemberVotingStateBuilder;
@@ -27,7 +27,7 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
         description = "Run a change-member-voting-states-for-shard test")
 public class ChangeMemberVotingStatesForShardCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcConsumerRegistry;
+    private RpcService rpcService;
     @Argument(index = 0, name = "shard-name", required = true)
     private String shardName;
     @Argument(index = 1, name = "data-store-type", required = true, description = "config / operational")
@@ -44,10 +44,10 @@ public class ChangeMemberVotingStatesForShardCommand extends AbstractRpcAction {
                 .setVoting(voting)
                 .build();
 
-        return rpcConsumerRegistry.getRpcService(ClusterAdminService.class)
-                .changeMemberVotingStatesForShard(new ChangeMemberVotingStatesForShardInputBuilder()
+        return rpcService.getRpc(ChangeMemberVotingStatesForShard.class)
+                .invoke(new ChangeMemberVotingStatesForShardInputBuilder()
                         .setShardName(shardName)
-                        .setDataStoreType(DataStoreType.forName(dataStoreType).orElse(null))
+                        .setDataStoreType(DataStoreType.forName(dataStoreType))
                         .setMemberVotingState(List.of(memberVotingState))
                         .build());
     }
index e0fd59ddf8f1e5d4fc6e4d1c0d9a6fb95d162c61..0ca0003c7e3520799286ec171249c74110a43108 100644 (file)
@@ -11,8 +11,8 @@ import com.google.common.util.concurrent.ListenableFuture;
 import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DeactivateEosDatacenter;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DeactivateEosDatacenterInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -21,11 +21,11 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
         description = "Run a deactivate-eos-datacenter test")
 public class DeactivateEosDatacenterCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcConsumerRegistry;
+    private RpcService rpcService;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcConsumerRegistry.getRpcService(ClusterAdminService.class)
-                .deactivateEosDatacenter(new DeactivateEosDatacenterInputBuilder().build());
+        return rpcService.getRpc(DeactivateEosDatacenter.class)
+                .invoke(new DeactivateEosDatacenterInputBuilder().build());
     }
 }
index ac523ac2cdc13635c317cd8ede1682d5af72077f..78c0b6f809601f33a9e4a78fb52f0e1a3d062d5f 100644 (file)
@@ -11,8 +11,8 @@ import com.google.common.util.concurrent.ListenableFuture;
 import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShards;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -21,11 +21,11 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
         description = "Run a flip-member-voting-states-for-all-shards test")
 public class FlipMemberVotingStatesForAllShardsCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcConsumerRegistry;
+    private RpcService rpcService;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcConsumerRegistry.getRpcService(ClusterAdminService.class)
-                .flipMemberVotingStatesForAllShards(new FlipMemberVotingStatesForAllShardsInputBuilder().build());
+        return rpcService.getRpc(FlipMemberVotingStatesForAllShards.class)
+                .invoke(new FlipMemberVotingStatesForAllShardsInputBuilder().build());
     }
 }
index be55c006add0a02f822875afc50bb6ef8730819a..c0028466800f5f4b1d07ec5c7dab315d2f986c8c 100644 (file)
@@ -11,8 +11,8 @@ import com.google.common.util.concurrent.ListenableFuture;
 import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetKnownClientsForAllShards;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetKnownClientsForAllShardsInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -21,11 +21,11 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
         description = "Run a get-known-clients-for-all-shards test")
 public class GetKnownClientsForAllShardsCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcConsumerRegistry;
+    private RpcService rpcService;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcConsumerRegistry.getRpcService(ClusterAdminService.class)
-                .getKnownClientsForAllShards(new GetKnownClientsForAllShardsInputBuilder().build());
+        return rpcService.getRpc(GetKnownClientsForAllShards.class)
+                .invoke(new GetKnownClientsForAllShardsInputBuilder().build());
     }
 }
index 8e914279ea68ce2f9fd3d17f515b762d0bb5f099..50a88e942cfd9d399fb91bdc04fda3037fb4013d 100644 (file)
@@ -12,9 +12,9 @@ import org.apache.karaf.shell.api.action.Argument;
 import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService;
+import org.opendaylight.mdsal.binding.api.RpcService;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRole;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRoleInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -22,7 +22,7 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 @Command(scope = "cluster-admin", name = "get-shard-role", description = "Run a get-shard-role test")
 public class GetShardRoleCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcConsumerRegistry;
+    private RpcService rpcService;
     @Argument(index = 0, name = "shard-name", required = true)
     private String shardName;
     @Argument(index = 1, name = "data-store-type", required = true, description = "config / operational")
@@ -30,10 +30,10 @@ public class GetShardRoleCommand extends AbstractRpcAction {
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcConsumerRegistry.getRpcService(ClusterAdminService.class)
-                .getShardRole(new GetShardRoleInputBuilder()
+        return rpcService.getRpc(GetShardRole.class)
+                .invoke(new GetShardRoleInputBuilder()
                         .setShardName(shardName)
-                        .setDataStoreType(DataStoreType.forName(dataStoreType).orElse(null))
+                        .setDataStoreType(DataStoreType.forName(dataStoreType))
                         .build());
     }
 }
index 13d99a8e0385fd64cd4ee42bec89367199908c6d..6a1b1731a66f9d36991b22c11e05a0f2d196fbe4 100644 (file)
@@ -12,9 +12,9 @@ import org.apache.karaf.shell.api.action.Argument;
 import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService;
+import org.opendaylight.mdsal.binding.api.RpcService;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.LocateShard;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.LocateShardInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -22,7 +22,7 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 @Command(scope = "cluster-admin", name = "locate-shard", description = "Run a locate-shard test")
 public class LocateShardCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcConsumerRegistry;
+    private RpcService rpcService;
     @Argument(index = 0, name = "shard-name", required = true)
     private String shardName;
     @Argument(index = 1, name = "data-store-type", required = true, description = "config / operational")
@@ -30,10 +30,10 @@ public class LocateShardCommand extends AbstractRpcAction {
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcConsumerRegistry.getRpcService(ClusterAdminService.class)
-                .locateShard(new LocateShardInputBuilder()
+        return rpcService.getRpc(LocateShard.class)
+                .invoke(new LocateShardInputBuilder()
                         .setShardName(shardName)
-                        .setDataStoreType(DataStoreType.forName(dataStoreType).orElse(null))
+                        .setDataStoreType(DataStoreType.forName(dataStoreType))
                         .build());
     }
 }
index 3b1c1453d06bcd5ddc8a9f19230f50f2fb395900..90aa8fc6538afbdae9094bbb87e0424103a83752 100644 (file)
@@ -12,9 +12,9 @@ import org.apache.karaf.shell.api.action.Argument;
 import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService;
+import org.opendaylight.mdsal.binding.api.RpcService;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocal;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -22,7 +22,7 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 @Command(scope = "cluster-admin", name = "make-leader-local", description = "Run a make-leader-local test")
 public class MakeLeaderLocalCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcConsumerRegistry;
+    private RpcService rpcService;
     @Argument(index = 0, name = "shard-name", required = true)
     private String shardName;
     @Argument(index = 1, name = "data-store-type", required = true, description = "config / operational")
@@ -30,10 +30,10 @@ public class MakeLeaderLocalCommand extends AbstractRpcAction {
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcConsumerRegistry.getRpcService(ClusterAdminService.class)
-                .makeLeaderLocal(new MakeLeaderLocalInputBuilder()
+        return rpcService.getRpc(MakeLeaderLocal.class)
+                .invoke(new MakeLeaderLocalInputBuilder()
                         .setShardName(shardName)
-                        .setDataStoreType(DataStoreType.forName(dataStoreType).orElse(null))
+                        .setDataStoreType(DataStoreType.forName(dataStoreType))
                         .build());
     }
 }
index 97f3a79eb1b7ac594c3eca77b37e9b5ca297aa94..c33b66330d07f374f6844c26d3d6049178479e4c 100644 (file)
@@ -12,8 +12,8 @@ import org.apache.karaf.shell.api.action.Argument;
 import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicas;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -22,14 +22,14 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
         description = "Run a remove-all-shard-replicas test")
 public class RemoveAllShardReplicasCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcConsumerRegistry;
+    private RpcService rpcService;
     @Argument(index = 0, name = "member-name",required = true)
     private String memberName;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcConsumerRegistry.getRpcService(ClusterAdminService.class)
-                .removeAllShardReplicas(new RemoveAllShardReplicasInputBuilder()
+        return rpcService.getRpc(RemoveAllShardReplicas.class)
+                .invoke(new RemoveAllShardReplicasInputBuilder()
                         .setMemberName(memberName)
                         .build());
     }
index e4a786432b7af6195202ed1697af85e2118c10be..9738f4fb77afb0b5b6426fae7f00cd8cb853e748 100644 (file)
@@ -12,9 +12,9 @@ import org.apache.karaf.shell.api.action.Argument;
 import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService;
+import org.opendaylight.mdsal.binding.api.RpcService;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplica;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplicaInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -22,7 +22,7 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 @Command(scope = "cluster-admin", name = "remove-shard-replica", description = "Run a remove-shard-replica")
 public class RemoveShardReplicaCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcConsumerRegistry;
+    private RpcService rpcService;
     @Argument(index = 0, name = "shard-name", required = true)
     private String shardName;
     @Argument(index = 1, name = "data-store-type", required = true, description = "config / operational")
@@ -32,10 +32,10 @@ public class RemoveShardReplicaCommand extends AbstractRpcAction {
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcConsumerRegistry.getRpcService(ClusterAdminService.class)
-                .removeShardReplica(new RemoveShardReplicaInputBuilder()
+        return rpcService.getRpc(RemoveShardReplica.class)
+                .invoke(new RemoveShardReplicaInputBuilder()
                         .setShardName(shardName)
-                        .setDataStoreType(DataStoreType.forName(dataStoreType).orElse(null))
+                        .setDataStoreType(DataStoreType.forName(dataStoreType))
                         .setMemberName(memberName)
                         .build());
     }
index 3a62b14b6f93c9701efa5198c1ce7ad91982872b..14056f32ca4989be1886020ad148e46cef208da1 100644 (file)
@@ -4,7 +4,7 @@
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-parent</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../parent</relativePath>
   </parent>
 
   <packaging>bundle</packaging>
 
   <dependencies>
-    <!-- Java -->
     <dependency>
-      <groupId>org.xmlunit</groupId>
-      <artifactId>xmlunit-legacy</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-simple</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-test-util</artifactId>
-    </dependency>
-
-    <!-- Apache -->
-    <dependency>
-      <groupId>commons-lang</groupId>
-      <artifactId>commons-lang</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>commons-io</groupId>
-      <artifactId>commons-io</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.commons</groupId>
-      <artifactId>commons-lang3</artifactId>
-      <scope>test</scope>
+      <!-- Enforce Netty’s optional dependency on servlet API -->
+      <!-- FIXME: is this really needed ? -->
+      <groupId>javax.servlet</groupId>
+      <artifactId>javax.servlet-api</artifactId>
     </dependency>
 
-    <!-- Akka -->
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>repackaged-akka</artifactId>
-    </dependency>
     <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-testkit_2.13</artifactId>
+      <groupId>com.github.spotbugs</groupId>
+      <artifactId>spotbugs-annotations</artifactId>
+      <optional>true</optional>
     </dependency>
     <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-persistence-tck_2.13</artifactId>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.scalatestplus</groupId>
-      <artifactId>junit-4-13_2.13</artifactId>
-      <scope>test</scope>
+      <groupId>com.guicedee.services</groupId>
+      <artifactId>javax.inject</artifactId>
+      <scope>provided</scope>
+      <optional>true</optional>
     </dependency>
-
     <dependency>
-      <!-- Enforce Netty’s optional dependency on servlet API -->
-      <!-- FIXME: is this really needed ? -->
-      <groupId>javax.servlet</groupId>
-      <artifactId>javax.servlet-api</artifactId>
+      <groupId>com.typesafe</groupId>
+      <artifactId>config</artifactId>
     </dependency>
-
-    <!-- Codahale -->
     <dependency>
       <groupId>io.dropwizard.metrics</groupId>
       <artifactId>metrics-core</artifactId>
       <artifactId>metrics-jmx</artifactId>
     </dependency>
     <dependency>
-      <groupId>com.guicedee.services</groupId>
-      <artifactId>javax.inject</artifactId>
-      <scope>provided</scope>
+      <groupId>org.checkerframework</groupId>
+      <artifactId>checker-qual</artifactId>
       <optional>true</optional>
     </dependency>
+    <dependency>
+      <groupId>org.eclipse.jdt</groupId>
+      <artifactId>org.eclipse.jdt.annotation</artifactId>
+    </dependency>
     <dependency>
       <groupId>org.kohsuke.metainf-services</groupId>
       <artifactId>metainf-services</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.osgi</groupId>
-      <artifactId>osgi.cmpn</artifactId>
+      <groupId>org.lz4</groupId>
+      <artifactId>lz4-java</artifactId>
+      <version>1.8.0</version>
     </dependency>
-
-    <!-- Google -->
     <dependency>
-      <groupId>com.google.guava</groupId>
-      <artifactId>guava-testlib</artifactId>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>concepts</artifactId>
     </dependency>
-
-    <!-- Scala -->
     <dependency>
-      <groupId>org.scala-lang</groupId>
-      <artifactId>scala-library</artifactId>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>util</artifactId>
     </dependency>
-
-    <!-- OpenDaylight -->
     <dependency>
       <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>util</artifactId>
+      <artifactId>yang-common</artifactId>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.yangtools</groupId>
       <artifactId>yang-data-api</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-codec-binfmt</artifactId>
+    </dependency>
     <dependency>
       <groupId>org.opendaylight.yangtools</groupId>
       <artifactId>yang-data-impl</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-tree-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-util</artifactId>
+    </dependency>
     <dependency>
       <groupId>org.opendaylight.yangtools</groupId>
       <artifactId>yang-model-api</artifactId>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-data-codec-binfmt</artifactId>
+      <artifactId>yang-repo-api</artifactId>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.yangtools</groupId>
       <artifactId>yang-repo-spi</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>repackaged-akka</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.osgi</groupId>
+      <artifactId>org.osgi.service.component.annotations</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.scala-lang</groupId>
+      <artifactId>scala-library</artifactId>
+    </dependency>
 
-    <!-- Compression -->
     <dependency>
-      <groupId>org.lz4</groupId>
-      <artifactId>lz4-java</artifactId>
-      <version>1.8.0</version>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava-testlib</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.typesafe.akka</groupId>
+      <artifactId>akka-persistence-tck_2.13</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.typesafe.akka</groupId>
+      <artifactId>akka-testkit_2.13</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-io</groupId>
+      <artifactId>commons-io</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-lang3</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-test-util</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.scalatestplus</groupId>
+      <artifactId>junit-4-13_2.13</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-simple</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.xmlunit</groupId>
+      <artifactId>xmlunit-core</artifactId>
     </dependency>
   </dependencies>
 
index c655dcdb891488b52f1f42741046594e9651a5e6..44afa634ccfc0b812761e9bb9fe97c6be9061900 100644 (file)
@@ -9,7 +9,10 @@
 package org.opendaylight.controller.cluster;
 
 import akka.japi.Procedure;
+import akka.persistence.JournalProtocol;
+import akka.persistence.SnapshotProtocol;
 import akka.persistence.SnapshotSelectionCriteria;
+import org.eclipse.jdt.annotation.NonNull;
 
 /**
  * DataPersistenceProvider provides methods to persist data and is an abstraction of the akka-persistence persistence
@@ -70,4 +73,20 @@ public interface DataPersistenceProvider {
      * @return the last sequence number
      */
     long getLastSequenceNumber();
+
+    /**
+     * Receive and potentially handle a {@link JournalProtocol} response.
+     *
+     * @param response A {@link JournalProtocol} response
+     * @return {@code true} if the response was handled
+     */
+    boolean handleJournalResponse(JournalProtocol.@NonNull Response response);
+
+    /**
+     * Receive and potentially handle a {@link SnapshotProtocol} response.
+     *
+     * @param response A {@link SnapshotProtocol} response
+     * @return {@code true} if the response was handled
+     */
+    boolean handleSnapshotResponse(SnapshotProtocol.@NonNull Response response);
 }
index f1a20fcc8e54f4e2f4908a8ec032477a6bd89b0f..3210819225b11e7b349772b8fc6a2735800a5bee 100644 (file)
@@ -8,6 +8,8 @@
 package org.opendaylight.controller.cluster;
 
 import akka.japi.Procedure;
+import akka.persistence.JournalProtocol;
+import akka.persistence.SnapshotProtocol;
 import akka.persistence.SnapshotSelectionCriteria;
 
 /**
@@ -18,11 +20,11 @@ import akka.persistence.SnapshotSelectionCriteria;
 public class DelegatingPersistentDataProvider implements DataPersistenceProvider {
     private DataPersistenceProvider delegate;
 
-    public DelegatingPersistentDataProvider(DataPersistenceProvider delegate) {
+    public DelegatingPersistentDataProvider(final DataPersistenceProvider delegate) {
         this.delegate = delegate;
     }
 
-    public void setDelegate(DataPersistenceProvider delegate) {
+    public void setDelegate(final DataPersistenceProvider delegate) {
         this.delegate = delegate;
     }
 
@@ -36,27 +38,27 @@ public class DelegatingPersistentDataProvider implements DataPersistenceProvider
     }
 
     @Override
-    public <T> void persist(T entry, Procedure<T> procedure) {
+    public <T> void persist(final T entry, final Procedure<T> procedure) {
         delegate.persist(entry, procedure);
     }
 
     @Override
-    public <T> void persistAsync(T entry, Procedure<T> procedure) {
+    public <T> void persistAsync(final T entry, final Procedure<T> procedure) {
         delegate.persistAsync(entry, procedure);
     }
 
     @Override
-    public void saveSnapshot(Object entry) {
+    public void saveSnapshot(final Object entry) {
         delegate.saveSnapshot(entry);
     }
 
     @Override
-    public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
+    public void deleteSnapshots(final SnapshotSelectionCriteria criteria) {
         delegate.deleteSnapshots(criteria);
     }
 
     @Override
-    public void deleteMessages(long sequenceNumber) {
+    public void deleteMessages(final long sequenceNumber) {
         delegate.deleteMessages(sequenceNumber);
     }
 
@@ -64,4 +66,14 @@ public class DelegatingPersistentDataProvider implements DataPersistenceProvider
     public long getLastSequenceNumber() {
         return delegate.getLastSequenceNumber();
     }
+
+    @Override
+    public boolean handleJournalResponse(final JournalProtocol.Response response) {
+        return delegate.handleJournalResponse(response);
+    }
+
+    @Override
+    public boolean handleSnapshotResponse(final SnapshotProtocol.Response response) {
+        return delegate.handleSnapshotResponse(response);
+    }
 }
index 9a4a34cf596d00dbf1d1eca8dd1973b88f3d236d..5461689d2aebc84739b165657db6330e5feba59f 100644 (file)
@@ -10,6 +10,8 @@ package org.opendaylight.controller.cluster;
 import static java.util.Objects.requireNonNull;
 
 import akka.japi.Procedure;
+import akka.persistence.JournalProtocol;
+import akka.persistence.SnapshotProtocol;
 import akka.persistence.SnapshotSelectionCriteria;
 import org.opendaylight.controller.cluster.common.actor.ExecuteInSelfActor;
 import org.slf4j.Logger;
@@ -70,4 +72,14 @@ public class NonPersistentDataProvider implements DataPersistenceProvider {
             LOG.error("An unexpected error occurred", e);
         }
     }
+
+    @Override
+    public boolean handleJournalResponse(final JournalProtocol.Response response) {
+        return false;
+    }
+
+    @Override
+    public boolean handleSnapshotResponse(final SnapshotProtocol.Response response) {
+        return false;
+    }
 }
index 21102f1f0e368a504bef81e526762ebd79464672..1faee47f526ac119d384ab41a4b29362a236c89f 100644 (file)
@@ -11,16 +11,19 @@ import static java.util.Objects.requireNonNull;
 
 import akka.japi.Procedure;
 import akka.persistence.AbstractPersistentActor;
+import akka.persistence.DeleteMessagesSuccess;
+import akka.persistence.DeleteSnapshotsSuccess;
+import akka.persistence.JournalProtocol;
+import akka.persistence.SnapshotProtocol;
 import akka.persistence.SnapshotSelectionCriteria;
 
 /**
  * A DataPersistenceProvider implementation with persistence enabled.
  */
 public class PersistentDataProvider implements DataPersistenceProvider {
-
     private final AbstractPersistentActor persistentActor;
 
-    public PersistentDataProvider(AbstractPersistentActor persistentActor) {
+    public PersistentDataProvider(final AbstractPersistentActor persistentActor) {
         this.persistentActor = requireNonNull(persistentActor, "persistentActor can't be null");
     }
 
@@ -30,27 +33,27 @@ public class PersistentDataProvider implements DataPersistenceProvider {
     }
 
     @Override
-    public <T> void persist(T entry, Procedure<T> procedure) {
+    public <T> void persist(final T entry, final Procedure<T> procedure) {
         persistentActor.persist(entry, procedure);
     }
 
     @Override
-    public <T> void persistAsync(T entry, Procedure<T> procedure) {
+    public <T> void persistAsync(final T entry, final Procedure<T> procedure) {
         persistentActor.persistAsync(entry, procedure);
     }
 
     @Override
-    public void saveSnapshot(Object snapshot) {
+    public void saveSnapshot(final Object snapshot) {
         persistentActor.saveSnapshot(snapshot);
     }
 
     @Override
-    public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
+    public void deleteSnapshots(final SnapshotSelectionCriteria criteria) {
         persistentActor.deleteSnapshots(criteria);
     }
 
     @Override
-    public void deleteMessages(long sequenceNumber) {
+    public void deleteMessages(final long sequenceNumber) {
         persistentActor.deleteMessages(sequenceNumber);
     }
 
@@ -58,4 +61,14 @@ public class PersistentDataProvider implements DataPersistenceProvider {
     public long getLastSequenceNumber() {
         return persistentActor.lastSequenceNr();
     }
+
+    @Override
+    public boolean handleJournalResponse(final JournalProtocol.Response response) {
+        return response instanceof DeleteMessagesSuccess;
+    }
+
+    @Override
+    public boolean handleSnapshotResponse(final SnapshotProtocol.Response response) {
+        return response instanceof DeleteSnapshotsSuccess;
+    }
 }
index 6af52fbd04e2af4a60ce5865e46a0b51570347bd..f66a77f66eeee8c4844c42e4406f330e05634a18 100644 (file)
@@ -17,10 +17,11 @@ import org.slf4j.LoggerFactory;
 
 public abstract class AbstractUntypedActor extends AbstractActor implements ExecuteInSelfActor {
     // The member name should be lower case but it's referenced in many subclasses. Suppressing the CS warning for now.
-    @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
     @SuppressWarnings("checkstyle:MemberName")
+    @SuppressFBWarnings(value = "SLF4J_LOGGER_SHOULD_BE_PRIVATE", justification = "Class identity is required")
     protected final Logger LOG = LoggerFactory.getLogger(getClass());
 
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
     protected AbstractUntypedActor() {
         LOG.debug("Actor created {}", getSelf());
         getContext().system().actorSelection("user/termination-monitor").tell(new Monitor(getSelf()), getSelf());
index 2124b24faf29ac92859b50834c8cbbe654a84bc3..d20ceb525224a2e91844d4e1be7f5dab140adcd6 100644 (file)
@@ -7,21 +7,24 @@
  */
 package org.opendaylight.controller.cluster.common.actor;
 
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+
 /**
  * Actor with its behaviour metered. Metering is enabled by configuration.
  */
 public abstract class AbstractUntypedActorWithMetering extends AbstractUntypedActor {
-
-    //this is used in the metric name. Some transient actors do not have defined names
+    // this is used in the metric name. Some transient actors do not have defined names
     private String actorNameOverride;
 
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
     public AbstractUntypedActorWithMetering() {
         if (isMetricsCaptureEnabled()) {
             getContext().become(new MeteringBehavior(this));
         }
     }
 
-    public AbstractUntypedActorWithMetering(String actorNameOverride) {
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
+    public AbstractUntypedActorWithMetering(final String actorNameOverride) {
         this.actorNameOverride = actorNameOverride;
         if (isMetricsCaptureEnabled()) {
             getContext().become(new MeteringBehavior(this));
@@ -29,8 +32,7 @@ public abstract class AbstractUntypedActorWithMetering extends AbstractUntypedAc
     }
 
     private boolean isMetricsCaptureEnabled() {
-        CommonConfig config = new CommonConfig(getContext().system().settings().config());
-        return config.isMetricCaptureEnabled();
+        return new CommonConfig(getContext().system().settings().config()).isMetricCaptureEnabled();
     }
 
     public String getActorNameOverride() {
index 711a43159a376a3c289271cab06cedd9409d9c06..8bf657e134939dea8b9bb3284149ffd4e314bfdf 100644 (file)
@@ -14,13 +14,16 @@ import org.eclipse.jdt.annotation.NonNull;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+// FIXME: override getContext(), getSelf() and others to be final to get rid of
+//        SpotBugs MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR violation
 public abstract class AbstractUntypedPersistentActor extends AbstractPersistentActor implements ExecuteInSelfActor {
 
     // The member name should be lower case but it's referenced in many subclasses. Suppressing the CS warning for now.
-    @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
     @SuppressWarnings("checkstyle:MemberName")
+    @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
     protected final Logger LOG = LoggerFactory.getLogger(getClass());
 
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
     protected AbstractUntypedPersistentActor() {
         LOG.trace("Actor created {}", getSelf());
         getContext().system().actorSelection("user/termination-monitor").tell(new Monitor(getSelf()), getSelf());
index ed03d334919ed7b2422a64802cf8dceef0cb52b3..760f0bd0fd7f92c321c6b26872022c51577051d6 100644 (file)
@@ -7,11 +7,13 @@
  */
 package org.opendaylight.controller.cluster.common.actor;
 
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+
 /**
  * Actor with its behaviour metered. Metering is enabled by configuration.
  */
 public abstract class AbstractUntypedPersistentActorWithMetering extends AbstractUntypedPersistentActor {
-
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
     public AbstractUntypedPersistentActorWithMetering() {
         if (isMetricsCaptureEnabled()) {
             getContext().become(new MeteringBehavior(this));
@@ -19,7 +21,6 @@ public abstract class AbstractUntypedPersistentActorWithMetering extends Abstrac
     }
 
     private boolean isMetricsCaptureEnabled() {
-        CommonConfig config = new CommonConfig(getContext().system().settings().config());
-        return config.isMetricCaptureEnabled();
+        return new CommonConfig(getContext().system().settings().config()).isMetricCaptureEnabled();
     }
 }
index 77dcba564decdfc1a031a688f19001fb7c59d9f3..c5c19d8d37ebf81300e7021d84c1784064d12116 100644 (file)
@@ -17,6 +17,7 @@ import akka.japi.Effect;
 import akka.remote.AssociationErrorEvent;
 import akka.remote.RemotingLifecycleEvent;
 import akka.remote.artery.ThisActorSystemQuarantinedEvent;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.HashSet;
 import java.util.Set;
 import org.slf4j.Logger;
@@ -42,6 +43,7 @@ public class QuarantinedMonitorActor extends UntypedAbstractActor {
     private final Set<Address> addressSet = new HashSet<>();
     private int count = 0;
 
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
     protected QuarantinedMonitorActor(final Effect callback) {
         this.callback = callback;
 
@@ -66,25 +68,23 @@ public class QuarantinedMonitorActor extends UntypedAbstractActor {
             return;
         }
 
-        if (message instanceof ThisActorSystemQuarantinedEvent) {
-            final ThisActorSystemQuarantinedEvent event = (ThisActorSystemQuarantinedEvent) message;
+        if (message instanceof ThisActorSystemQuarantinedEvent event) {
             LOG.warn("Got quarantined by {}", event.remoteAddress());
             quarantined = true;
 
             // execute the callback
             callback.apply();
-        } else  if (message instanceof AssociationErrorEvent) {
+        } else if (message instanceof AssociationErrorEvent event) {
             final String errorMessage = message.toString();
             LOG.trace("errorMessage:{}", errorMessage);
             if (errorMessage.contains("The remote system has a UID that has been quarantined")) {
-                final Address address = ((AssociationErrorEvent) message).getRemoteAddress();
+                final Address address = event.getRemoteAddress();
                 addressSet.add(address);
                 count++;
                 LOG.trace("address:{} addressSet: {} count:{}", address, addressSet, count);
                 if (count >= MESSAGE_THRESHOLD && addressSet.size() > 1) {
                     count = 0;
                     addressSet.clear();
-                    final AssociationErrorEvent event = (AssociationErrorEvent) message;
                     LOG.warn("Got quarantined via AssociationEvent by {}", event.remoteAddress());
                     quarantined = true;
 
@@ -95,8 +95,7 @@ public class QuarantinedMonitorActor extends UntypedAbstractActor {
                 count = 0;
                 addressSet.clear();
             }
-        } else if (message instanceof ClusterEvent.MemberDowned) {
-            final ClusterEvent.MemberDowned event = (ClusterEvent.MemberDowned) message;
+        } else if (message instanceof ClusterEvent.MemberDowned event) {
             if (Cluster.get(getContext().system()).selfMember().equals(event.member())) {
                 LOG.warn("This member has been downed, restarting");
 
index 6e567fa245bf7838bf095ac30288f909e62971a5..19df464f9e132c02a17e75294be460bad812887b 100644 (file)
@@ -35,38 +35,35 @@ public class NormalizedNodeNavigator {
             final DataContainerNode dataContainerNode) {
         visitor.visitNode(level, parentPath, dataContainerNode);
 
-        String newParentPath = parentPath + "/" + dataContainerNode.getIdentifier().toString();
+        String newParentPath = parentPath + "/" + dataContainerNode.name().toString();
 
-        for (NormalizedNode node : dataContainerNode.body()) {
-            if (node instanceof MixinNode && node instanceof NormalizedNodeContainer) {
-                navigateNormalizedNodeContainerMixin(level, newParentPath, (NormalizedNodeContainer<?>) node);
+        for (var node : dataContainerNode.body()) {
+            if (node instanceof MixinNode && node instanceof NormalizedNodeContainer<?> container) {
+                navigateNormalizedNodeContainerMixin(level, newParentPath, container);
             } else {
                 navigateNormalizedNode(level, newParentPath, node);
             }
         }
-
     }
 
     private void navigateNormalizedNodeContainerMixin(final int level, final String parentPath,
             final NormalizedNodeContainer<?> node) {
         visitor.visitNode(level, parentPath, node);
 
-        String newParentPath = parentPath + "/" + node.getIdentifier().toString();
+        String newParentPath = parentPath + "/" + node.name().toString();
 
-        for (NormalizedNode normalizedNode : node.body()) {
-            if (normalizedNode instanceof MixinNode && normalizedNode instanceof NormalizedNodeContainer) {
-                navigateNormalizedNodeContainerMixin(level + 1, newParentPath,
-                        (NormalizedNodeContainer<?>) normalizedNode);
+        for (var normalizedNode : node.body()) {
+            if (normalizedNode instanceof MixinNode && normalizedNode instanceof NormalizedNodeContainer<?> container) {
+                navigateNormalizedNodeContainerMixin(level + 1, newParentPath, container);
             } else {
                 navigateNormalizedNode(level, newParentPath, normalizedNode);
             }
         }
-
     }
 
     private void navigateNormalizedNode(final int level, final String parentPath, final NormalizedNode normalizedNode) {
-        if (normalizedNode instanceof DataContainerNode) {
-            navigateDataContainerNode(level + 1, parentPath, (DataContainerNode) normalizedNode);
+        if (normalizedNode instanceof DataContainerNode dataContainer) {
+            navigateDataContainerNode(level + 1, parentPath, dataContainer);
         } else {
             visitor.visitNode(level + 1, parentPath, normalizedNode);
         }
index a2c456d17a65efb3fa5c2007379f75a1219dcc40..51e61ea47f3a75357f0bf3d56053353726602906 100644 (file)
@@ -7,8 +7,6 @@
  */
 package org.opendaylight.controller.cluster.datastore.node.utils.stream;
 
-import static org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion.MAGNESIUM;
-
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
@@ -45,7 +43,7 @@ public final class SerializationUtils {
 
     public static void writeNormalizedNode(final DataOutput out, final @Nullable NormalizedNode node)
             throws IOException {
-        writeNormalizedNode(out, MAGNESIUM, node);
+        writeNormalizedNode(out, NormalizedNodeStreamVersion.POTASSIUM, node);
     }
 
     public static void writeNormalizedNode(final DataOutput out, final NormalizedNodeStreamVersion version,
@@ -67,7 +65,7 @@ public final class SerializationUtils {
 
     public static void writePath(final DataOutput out, final @NonNull YangInstanceIdentifier path)
             throws IOException {
-        writePath(out, MAGNESIUM, path);
+        writePath(out, NormalizedNodeStreamVersion.POTASSIUM, path);
     }
 
     public static void writePath(final DataOutput out, final NormalizedNodeStreamVersion version,
@@ -95,7 +93,7 @@ public final class SerializationUtils {
 
     public static void writeNodeAndPath(final DataOutput out, final YangInstanceIdentifier path,
             final NormalizedNode node) throws IOException {
-        writeNodeAndPath(out, MAGNESIUM, path, node);
+        writeNodeAndPath(out, NormalizedNodeStreamVersion.POTASSIUM, path, node);
     }
 
     public static <T> void readPathAndNode(final DataInput in, final T instance, final Applier<T> applier)
@@ -117,6 +115,6 @@ public final class SerializationUtils {
 
     public static void writePathAndNode(final DataOutput out, final YangInstanceIdentifier path,
             final NormalizedNode node) throws IOException {
-        writePathAndNode(out, MAGNESIUM, path, node);
+        writePathAndNode(out, NormalizedNodeStreamVersion.POTASSIUM, path, node);
     }
 }
index 13500c17c0fe964d75870c1c2783a6f6af91f67e..cee5a0329a75b73eb36947b547bd6c65e6dbfe9f 100644 (file)
@@ -18,7 +18,6 @@ import java.util.NoSuchElementException;
 import java.util.Optional;
 import javax.xml.transform.dom.DOMSource;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
@@ -26,7 +25,7 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgum
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
 import org.opendaylight.yangtools.yang.data.impl.schema.ReusableImmutableNormalizedNodeStreamWriter;
-import org.opendaylight.yangtools.yang.data.util.DataSchemaContextNode;
+import org.opendaylight.yangtools.yang.data.util.DataSchemaContext;
 import org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree;
 import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
@@ -58,12 +57,12 @@ abstract class AbstractNormalizedNodePruner implements NormalizedNodeStreamWrite
 
     private static final Logger LOG = LoggerFactory.getLogger(AbstractNormalizedNodePruner.class);
 
-    private final Deque<DataSchemaContextNode<?>> stack = new ArrayDeque<>();
+    private final Deque<DataSchemaContext> stack = new ArrayDeque<>();
     private final ReusableImmutableNormalizedNodeStreamWriter delegate =
             ReusableImmutableNormalizedNodeStreamWriter.create();
     private final DataSchemaContextTree tree;
 
-    private DataSchemaContextNode<?> nodePathSchemaNode;
+    private DataSchemaContext nodePathSchemaNode;
     private NormalizedNode normalizedNode;
     private State state = State.UNITIALIZED;
     private int unknown;
@@ -145,11 +144,6 @@ abstract class AbstractNormalizedNodePruner implements NormalizedNodeStreamWrite
         enter(ReusableImmutableNormalizedNodeStreamWriter::startChoiceNode, name, childSizeHint);
     }
 
-    @Override
-    public final void startAugmentationNode(final AugmentationIdentifier identifier) throws IOException {
-        enter(ReusableImmutableNormalizedNodeStreamWriter::startAugmentationNode, identifier);
-    }
-
     @Override
     public final  boolean startAnyxmlNode(final NodeIdentifier name, final Class<?> objectModel) throws IOException {
         if (enter(name)) {
@@ -181,7 +175,7 @@ abstract class AbstractNormalizedNodePruner implements NormalizedNodeStreamWrite
         }
     }
 
-    Object translateScalar(final DataSchemaContextNode<?> context, final Object value) throws IOException {
+    Object translateScalar(final DataSchemaContext context, final Object value) {
         // Default is pass-through
         return value;
     }
@@ -206,7 +200,8 @@ abstract class AbstractNormalizedNodePruner implements NormalizedNodeStreamWrite
         }
 
         if (stack.isEmpty()) {
-            normalizedNode = delegate.getResult();
+            final var result = delegate.result();
+            normalizedNode = result != null ? result.data() : null;
             state = State.CLOSED;
         }
     }
@@ -247,10 +242,11 @@ abstract class AbstractNormalizedNodePruner implements NormalizedNodeStreamWrite
             return false;
         }
 
-        final DataSchemaContextNode<?> schema;
-        final DataSchemaContextNode<?> parent = currentSchema();
+        final DataSchemaContext schema;
+        final DataSchemaContext parent = currentSchema();
         if (parent != null) {
-            schema = parent.getChild(name);
+            schema = parent instanceof DataSchemaContext.Composite compositeParent ? compositeParent.childByArg(name)
+                : null;
         } else {
             schema = nodePathSchemaNode;
         }
@@ -262,7 +258,7 @@ abstract class AbstractNormalizedNodePruner implements NormalizedNodeStreamWrite
         }
 
         stack.push(schema);
-        final DataSchemaNode dataSchema = schema.getDataSchemaNode();
+        final DataSchemaNode dataSchema = schema.dataSchemaNode();
         if (dataSchema != null) {
             delegate.nextDataSchemaNode(dataSchema);
         }
@@ -282,7 +278,7 @@ abstract class AbstractNormalizedNodePruner implements NormalizedNodeStreamWrite
         }
     }
 
-    final DataSchemaContextNode<?> currentSchema() {
+    final DataSchemaContext currentSchema() {
         return stack.peek();
     }
 }
index b3e02a4761c3cd323d2158ea370d6c5da5cc8f9b..4c6c2233964d84bfd4d55bf0d43aa3255b266dff 100644 (file)
@@ -30,7 +30,7 @@ import org.opendaylight.yangtools.yang.common.Uint8;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
 import org.opendaylight.yangtools.yang.data.impl.schema.ReusableImmutableNormalizedNodeStreamWriter;
-import org.opendaylight.yangtools.yang.data.util.DataSchemaContextNode;
+import org.opendaylight.yangtools.yang.data.util.DataSchemaContext;
 import org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree;
 import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
 import org.opendaylight.yangtools.yang.model.api.LeafSchemaNode;
@@ -54,9 +54,9 @@ final class UintAdaptingPruner extends ReusableNormalizedNodePruner {
         UINT8 {
             @Override
             public Object apply(final Object obj) {
-                if (obj instanceof Short) {
+                if (obj instanceof Short shortObj) {
                     LOG.trace("Translating legacy uint8 {}", obj);
-                    return Uint8.valueOf((Short) obj);
+                    return Uint8.valueOf(shortObj);
                 }
                 return obj;
             }
@@ -64,9 +64,9 @@ final class UintAdaptingPruner extends ReusableNormalizedNodePruner {
         UINT16 {
             @Override
             public Object apply(final Object obj) {
-                if (obj instanceof Integer) {
+                if (obj instanceof Integer intObj) {
                     LOG.trace("Translating legacy uint16 {}", obj);
-                    return Uint16.valueOf((Integer) obj);
+                    return Uint16.valueOf(intObj);
                 }
                 return obj;
             }
@@ -74,9 +74,9 @@ final class UintAdaptingPruner extends ReusableNormalizedNodePruner {
         UINT32 {
             @Override
             public Object apply(final Object obj) {
-                if (obj instanceof Long) {
+                if (obj instanceof Long longObj) {
                     LOG.trace("Translating legacy uint32 {}", obj);
-                    return Uint32.valueOf((Long) obj);
+                    return Uint32.valueOf(longObj);
                 }
                 return obj;
             }
@@ -84,9 +84,9 @@ final class UintAdaptingPruner extends ReusableNormalizedNodePruner {
         UINT64 {
             @Override
             public Object apply(final Object obj) {
-                if (obj instanceof BigInteger) {
+                if (obj instanceof BigInteger bigInt) {
                     LOG.trace("Translating legacy uint64 {}", obj);
-                    return Uint64.valueOf((BigInteger) obj);
+                    return Uint64.valueOf(bigInt);
                 }
                 return obj;
             }
@@ -133,18 +133,17 @@ final class UintAdaptingPruner extends ReusableNormalizedNodePruner {
     }
 
     @Override
-    Object translateScalar(final DataSchemaContextNode<?> context, final Object value) throws IOException {
-        final DataSchemaNode schema = context.getDataSchemaNode();
-        return schema instanceof TypedDataSchemaNode ? adaptValue(((TypedDataSchemaNode) schema).getType(), value)
-                : value;
+    Object translateScalar(final DataSchemaContext context, final Object value) {
+        final DataSchemaNode schema = context.dataSchemaNode();
+        return schema instanceof TypedDataSchemaNode typed ? adaptValue(typed.getType(), value) : value;
     }
 
     private void adaptEntry(final ReusableImmutableNormalizedNodeStreamWriter writer, final NodeWithValue<?> name) {
         final NodeWithValue<?> adapted;
-        final DataSchemaNode schema = currentSchema().getDataSchemaNode();
-        if (schema instanceof TypedDataSchemaNode) {
+        final DataSchemaNode schema = currentSchema().dataSchemaNode();
+        if (schema instanceof TypedDataSchemaNode typed) {
             final Object oldValue = name.getValue();
-            final Object newValue = adaptValue(((TypedDataSchemaNode) schema).getType(), oldValue);
+            final Object newValue = adaptValue(typed.getType(), oldValue);
             adapted = newValue == oldValue ? name : new NodeWithValue<>(name.getNodeType(), newValue);
         } else {
             adapted = name;
@@ -156,9 +155,9 @@ final class UintAdaptingPruner extends ReusableNormalizedNodePruner {
     private void adaptEntry(final ReusableImmutableNormalizedNodeStreamWriter writer,
             final NodeIdentifierWithPredicates name, final int size) {
         final NodeIdentifierWithPredicates adapted;
-        final DataSchemaNode schema = currentSchema().getDataSchemaNode();
-        if (schema instanceof ListSchemaNode) {
-            adapted = NIP_ADAPTERS.getUnchecked((ListSchemaNode) schema).apply(name);
+        final DataSchemaNode schema = currentSchema().dataSchemaNode();
+        if (schema instanceof ListSchemaNode list) {
+            adapted = NIP_ADAPTERS.getUnchecked(list).apply(name);
         } else {
             adapted = name;
         }
index 35ab00f4f5b12ebbd63929dce66eff4918472054..3e299e3d9b625825b02ba8fc09106384b2f17ad0 100644 (file)
@@ -16,7 +16,7 @@ import java.util.Optional;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModificationCursor;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModificationCursor;
 
 /**
  * Abstract {@link DataTreeModificationCursor} which tracks the current path. Subclasses can get the current path
@@ -26,7 +26,7 @@ import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification
  */
 @Beta
 public abstract class AbstractDataTreeModificationCursor implements DataTreeModificationCursor {
-    private YangInstanceIdentifier current = YangInstanceIdentifier.empty();
+    private YangInstanceIdentifier current = YangInstanceIdentifier.of();
 
     protected final YangInstanceIdentifier current() {
         return current;
index 73cdece20fc25da023df2250a795adb01b4a6f67..1376c6771490aeaec4adbd2085ad5b005cb92301 100644 (file)
@@ -126,14 +126,14 @@ public final class ChunkedOutputStream extends OutputStream {
 
     public Either<byte[], ChunkedByteArray> toVariant() {
         checkClosed();
-        return result instanceof byte[] ? Either.ofFirst((byte[]) result)
+        return result instanceof byte[] bytes ? Either.ofFirst(bytes)
                 : Either.ofSecond(new ChunkedByteArray(size, (ImmutableList<byte[]>) result));
     }
 
     @VisibleForTesting
     ChunkedByteArray toChunkedByteArray() {
         checkClosed();
-        return new ChunkedByteArray(size, result instanceof byte[] ? ImmutableList.of((byte[]) result)
+        return new ChunkedByteArray(size, result instanceof byte[] bytes ? ImmutableList.of(bytes)
             : (ImmutableList<byte[]>) result);
     }
 
index 029464a82b0c83b2a89d93e4bf252a6fd5d3adb0..b00e4bee4e31a9853d574cf6d85fc260bf606922 100644 (file)
@@ -91,7 +91,7 @@ public class FileBackedOutputStream extends OutputStream {
                         if (file != null) {
                             return Files.newInputStream(file.toPath());
                         } else {
-                            return new ByteArrayInputStream(memory.getBuffer(), 0, memory.getCount());
+                            return new ByteArrayInputStream(memory.buf(), 0, memory.count());
                         }
                     }
                 }
@@ -178,20 +178,20 @@ public class FileBackedOutputStream extends OutputStream {
             throw new IOException("Stream already closed");
         }
 
-        if (file == null && memory.getCount() + len > fileThreshold) {
+        if (file == null && memory.count() + len > fileThreshold) {
             final File temp = File.createTempFile("FileBackedOutputStream", null,
                     fileDirectory == null ? null : new File(fileDirectory));
             temp.deleteOnExit();
             final Cleaner.Cleanable cleanup = FILE_CLEANER.register(this, () -> deleteFile(temp));
 
-            LOG.debug("Byte count {} has exceeded threshold {} - switching to file: {}", memory.getCount() + len,
+            LOG.debug("Byte count {} has exceeded threshold {} - switching to file: {}", memory.count() + len,
                     fileThreshold, temp);
 
             final OutputStream transfer;
             try {
                 transfer = Files.newOutputStream(temp.toPath());
                 try {
-                    transfer.write(memory.getBuffer(), 0, memory.getCount());
+                    transfer.write(memory.buf(), 0, memory.count());
                     transfer.flush();
                 } catch (IOException e) {
                     try {
@@ -224,12 +224,12 @@ public class FileBackedOutputStream extends OutputStream {
     /**
      * ByteArrayOutputStream that exposes its internals for efficiency.
      */
-    private static class MemoryOutputStream extends ByteArrayOutputStream {
-        byte[] getBuffer() {
+    private static final class MemoryOutputStream extends ByteArrayOutputStream {
+        byte[] buf() {
             return buf;
         }
 
-        int getCount() {
+        int count() {
             return count;
         }
     }
index 297186d9f7c6bd9f13f8c6bea68ae33274931836..2b41bc595fad01f98f0969425fa11830316714f6 100644 (file)
@@ -42,11 +42,11 @@ public final  class MessageAssembler implements AutoCloseable {
     private final String logContext;
 
     MessageAssembler(final Builder builder) {
-        this.fileBackedStreamFactory = requireNonNull(builder.fileBackedStreamFactory,
+        fileBackedStreamFactory = requireNonNull(builder.fileBackedStreamFactory,
                 "FiledBackedStreamFactory cannot be null");
-        this.assembledMessageCallback = requireNonNull(builder.assembledMessageCallback,
+        assembledMessageCallback = requireNonNull(builder.assembledMessageCallback,
                 "assembledMessageCallback cannot be null");
-        this.logContext = builder.logContext;
+        logContext = builder.logContext;
 
         stateCache = CacheBuilder.newBuilder()
                 .expireAfterAccess(builder.expireStateAfterInactivityDuration, builder.expireStateAfterInactivityUnit)
@@ -97,13 +97,13 @@ public final  class MessageAssembler implements AutoCloseable {
      * @return true if the message was handled, false otherwise
      */
     public boolean handleMessage(final Object message, final @NonNull ActorRef sendTo) {
-        if (message instanceof MessageSlice) {
-            LOG.debug("{}: handleMessage: {}", logContext, message);
-            onMessageSlice((MessageSlice) message, sendTo);
+        if (message instanceof MessageSlice messageSlice) {
+            LOG.debug("{}: handleMessage: {}", logContext, messageSlice);
+            onMessageSlice(messageSlice, sendTo);
             return true;
-        } else if (message instanceof AbortSlicing) {
-            LOG.debug("{}: handleMessage: {}", logContext, message);
-            onAbortSlicing((AbortSlicing) message);
+        } else if (message instanceof AbortSlicing abortSlicing) {
+            LOG.debug("{}: handleMessage: {}", logContext, abortSlicing);
+            onAbortSlicing(abortSlicing);
             return true;
         }
 
@@ -116,14 +116,9 @@ public final  class MessageAssembler implements AutoCloseable {
             final AssembledMessageState state = stateCache.get(identifier, () -> createState(messageSlice));
             processMessageSliceForState(messageSlice, state, sendTo);
         } catch (ExecutionException e) {
-            final MessageSliceException messageSliceEx;
             final Throwable cause = e.getCause();
-            if (cause instanceof MessageSliceException) {
-                messageSliceEx = (MessageSliceException) cause;
-            } else {
-                messageSliceEx = new MessageSliceException(String.format(
-                        "Error creating state for identifier %s", identifier), cause);
-            }
+            final MessageSliceException messageSliceEx = cause instanceof MessageSliceException sliceEx ? sliceEx
+                : new MessageSliceException(String.format("Error creating state for identifier %s", identifier), cause);
 
             messageSlice.getReplyTo().tell(MessageSliceReply.failed(identifier, messageSliceEx, sendTo),
                     ActorRef.noSender());
@@ -231,7 +226,7 @@ public final  class MessageAssembler implements AutoCloseable {
          * @return this Builder
          */
         public Builder fileBackedStreamFactory(final FileBackedOutputStreamFactory newFileBackedStreamFactory) {
-            this.fileBackedStreamFactory = requireNonNull(newFileBackedStreamFactory);
+            fileBackedStreamFactory = requireNonNull(newFileBackedStreamFactory);
             return this;
         }
 
@@ -243,7 +238,7 @@ public final  class MessageAssembler implements AutoCloseable {
          * @return this Builder
          */
         public Builder assembledMessageCallback(final BiConsumer<Object, ActorRef> newAssembledMessageCallback) {
-            this.assembledMessageCallback = newAssembledMessageCallback;
+            assembledMessageCallback = newAssembledMessageCallback;
             return this;
         }
 
@@ -258,8 +253,8 @@ public final  class MessageAssembler implements AutoCloseable {
          */
         public Builder expireStateAfterInactivity(final long duration, final TimeUnit unit) {
             checkArgument(duration > 0, "duration must be > 0");
-            this.expireStateAfterInactivityDuration = duration;
-            this.expireStateAfterInactivityUnit = unit;
+            expireStateAfterInactivityDuration = duration;
+            expireStateAfterInactivityUnit = unit;
             return this;
         }
 
@@ -270,7 +265,7 @@ public final  class MessageAssembler implements AutoCloseable {
          * @return this Builder
          */
         public Builder logContext(final String newLogContext) {
-            this.logContext = newLogContext;
+            logContext = newLogContext;
             return this;
         }
 
index 0cc36689bb9b470b728732d2eb1f0efef446338a..852e7f9b19212f8b6a5ac1da6e22db2ef6bb6607 100644 (file)
@@ -60,17 +60,9 @@ final class MessageSliceIdentifier implements Identifier {
 
     @Override
     public boolean equals(final Object obj) {
-        if (this == obj) {
-            return true;
-        }
-
-        if (!(obj instanceof MessageSliceIdentifier)) {
-            return false;
-        }
-
-        MessageSliceIdentifier other = (MessageSliceIdentifier) obj;
-        return other.clientIdentifier.equals(clientIdentifier) && other.slicerId == slicerId
-                && other.messageId == messageId;
+        return this == obj || obj instanceof MessageSliceIdentifier other
+            && other.clientIdentifier.equals(clientIdentifier) && other.slicerId == slicerId
+            && other.messageId == messageId;
     }
 
     @Override
index 57a6f9ed4f44d48ffb2720126b32c21627222b3a..f30dbc66a71966f3691bf57a471a730aaa3b986b 100644 (file)
@@ -48,12 +48,12 @@ public class MessageSlicer implements AutoCloseable {
     private final long id;
 
     MessageSlicer(final Builder builder) {
-        this.fileBackedStreamFactory = builder.fileBackedStreamFactory;
-        this.messageSliceSize = builder.messageSliceSize;
-        this.maxSlicingTries = builder.maxSlicingTries;
+        fileBackedStreamFactory = builder.fileBackedStreamFactory;
+        messageSliceSize = builder.messageSliceSize;
+        maxSlicingTries = builder.maxSlicingTries;
 
         id = SLICER_ID_COUNTER.getAndIncrement();
-        this.logContext = builder.logContext + "_slicer-id-" + id;
+        logContext = builder.logContext + "_slicer-id-" + id;
 
         CacheBuilder<Identifier, SlicedMessageState<ActorRef>> cacheBuilder =
                 CacheBuilder.newBuilder().removalListener(this::stateRemoved);
@@ -174,9 +174,9 @@ public class MessageSlicer implements AutoCloseable {
      * @return true if the message was handled, false otherwise
      */
     public boolean handleMessage(final Object message) {
-        if (message instanceof MessageSliceReply) {
-            LOG.debug("{}: handleMessage: {}", logContext, message);
-            return onMessageSliceReply((MessageSliceReply) message);
+        if (message instanceof MessageSliceReply sliceReply) {
+            LOG.debug("{}: handleMessage: {}", logContext, sliceReply);
+            return onMessageSliceReply(sliceReply);
         }
 
         return false;
@@ -219,8 +219,7 @@ public class MessageSlicer implements AutoCloseable {
 
     private boolean onMessageSliceReply(final MessageSliceReply reply) {
         final Identifier identifier = reply.getIdentifier();
-        if (!(identifier instanceof MessageSliceIdentifier)
-                || ((MessageSliceIdentifier)identifier).getSlicerId() != id) {
+        if (!(identifier instanceof MessageSliceIdentifier sliceIdentifier) || sliceIdentifier.getSlicerId() != id) {
             return false;
         }
 
@@ -236,7 +235,7 @@ public class MessageSlicer implements AutoCloseable {
                 final Optional<MessageSliceException> failure = reply.getFailure();
                 if (failure.isPresent()) {
                     LOG.warn("{}: Received failed {}", logContext, reply);
-                    processMessageSliceException(failure.get(), state, reply.getSendTo());
+                    processMessageSliceException(failure.orElseThrow(), state, reply.getSendTo());
                     return true;
                 }
 
@@ -336,7 +335,7 @@ public class MessageSlicer implements AutoCloseable {
          * @return this Builder
          */
         public Builder fileBackedStreamFactory(final FileBackedOutputStreamFactory newFileBackedStreamFactory) {
-            this.fileBackedStreamFactory = requireNonNull(newFileBackedStreamFactory);
+            fileBackedStreamFactory = requireNonNull(newFileBackedStreamFactory);
             return this;
         }
 
@@ -348,7 +347,7 @@ public class MessageSlicer implements AutoCloseable {
          */
         public Builder messageSliceSize(final int newMessageSliceSize) {
             checkArgument(newMessageSliceSize > 0, "messageSliceSize must be > 0");
-            this.messageSliceSize = newMessageSliceSize;
+            messageSliceSize = newMessageSliceSize;
             return this;
         }
 
@@ -361,7 +360,7 @@ public class MessageSlicer implements AutoCloseable {
          */
         public Builder maxSlicingTries(final int newMaxSlicingTries) {
             checkArgument(newMaxSlicingTries > 0, "newMaxSlicingTries must be > 0");
-            this.maxSlicingTries = newMaxSlicingTries;
+            maxSlicingTries = newMaxSlicingTries;
             return this;
         }
 
@@ -376,8 +375,8 @@ public class MessageSlicer implements AutoCloseable {
          */
         public Builder expireStateAfterInactivity(final long duration, final TimeUnit unit) {
             checkArgument(duration > 0, "duration must be > 0");
-            this.expireStateAfterInactivityDuration = duration;
-            this.expireStateAfterInactivityUnit = unit;
+            expireStateAfterInactivityDuration = duration;
+            expireStateAfterInactivityUnit = unit;
             return this;
         }
 
@@ -388,7 +387,7 @@ public class MessageSlicer implements AutoCloseable {
          * @return this Builder
          */
         public Builder logContext(final String newLogContext) {
-            this.logContext = requireNonNull(newLogContext);
+            logContext = requireNonNull(newLogContext);
             return this;
         }
 
index caa1a8debf53911bd2193c085c36a64a5aea2117..373823ef0ffeaf0ac37140aab1d437c7e4753979 100644 (file)
@@ -18,11 +18,12 @@ import org.eclipse.jdt.annotation.Nullable;
  * @author Thomas Pantelis
  */
 public class LeaderStateChanged {
-    private final String memberId;
-    private final String leaderId;
+    private final @NonNull String memberId;
+    private final @Nullable String leaderId;
     private final short leaderPayloadVersion;
 
-    public LeaderStateChanged(@NonNull String memberId, @Nullable String leaderId, short leaderPayloadVersion) {
+    public LeaderStateChanged(final @NonNull String memberId, final @Nullable String leaderId,
+            final short leaderPayloadVersion) {
         this.memberId = requireNonNull(memberId);
         this.leaderId = leaderId;
         this.leaderPayloadVersion = leaderPayloadVersion;
index bb4ad65f161f11bb57e5d263cdc17b41a2e0bc57..ed0c10a7172b8edeb759819f8ddf1dc861b87ea1 100644 (file)
@@ -72,9 +72,8 @@ public class RoleChangeNotifier extends AbstractUntypedActor implements AutoClos
             }
 
 
-        } else if (message instanceof RoleChanged) {
+        } else if (message instanceof RoleChanged roleChanged) {
             // this message is sent by RaftActor. Notify registered listeners when this message is received.
-            RoleChanged roleChanged = (RoleChanged) message;
 
             LOG.info("RoleChangeNotifier for {} , received role change from {} to {}", memberId,
                 roleChanged.getOldRole(), roleChanged.getNewRole());
@@ -83,13 +82,13 @@ public class RoleChangeNotifier extends AbstractUntypedActor implements AutoClos
                 new RoleChangeNotification(roleChanged.getMemberId(),
                     roleChanged.getOldRole(), roleChanged.getNewRole());
 
-            for (ActorRef listener: registeredListeners.values()) {
+            for (ActorRef listener : registeredListeners.values()) {
                 listener.tell(latestRoleChangeNotification, getSelf());
             }
-        } else if (message instanceof LeaderStateChanged) {
-            latestLeaderStateChanged = (LeaderStateChanged)message;
+        } else if (message instanceof LeaderStateChanged leaderStateChanged) {
+            latestLeaderStateChanged = leaderStateChanged;
 
-            for (ActorRef listener: registeredListeners.values()) {
+            for (ActorRef listener : registeredListeners.values()) {
                 listener.tell(latestLeaderStateChanged, getSelf());
             }
         } else {
index b63b732f5bc99f5b97c552ee71af15c4ef700490..c7963056753c43a0062440e016dc80b465c79549 100644 (file)
@@ -59,7 +59,7 @@ import scala.concurrent.Future;
  *
  * @author Thomas Pantelis
  */
-public class LocalSnapshotStore extends SnapshotStore {
+public final class LocalSnapshotStore extends SnapshotStore {
     private static final Logger LOG = LoggerFactory.getLogger(LocalSnapshotStore.class);
     private static final int PERSISTENCE_ID_START_INDEX = "snapshot-".length();
 
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/Payload.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/Payload.java
deleted file mode 100644 (file)
index fc65743..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.raft.protobuff.client.messages;
-
-/**
- * An instance of a Payload class is meant to be used as the Payload for
- * AppendEntries.
- *
- * <p>
- * When an actor which is derived from RaftActor attempts to persistData it
- * must pass an instance of the Payload class. Similarly when state needs to
- * be applied to the derived RaftActor it will be passed an instance of the
- * Payload class.
- */
-public abstract class Payload {
-    public abstract int size();
-}
index 03b44b585129e97f199310b955272c57e8a055e8..c8ceb13ed97d429ef8dab7b1be26fe0a7b7ba2e7 100644 (file)
@@ -11,7 +11,7 @@ import com.google.common.annotations.Beta;
 import java.util.Set;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.schema.provider.impl.YangTextSchemaSourceSerializationProxy;
-import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier;
 import scala.concurrent.Future;
 
 /**
index 4a4416d57655ce30d2cf0c3d2699642b383464ea..7a53188718e904115c99e2d4474543b90f802b94 100644 (file)
@@ -12,19 +12,18 @@ import com.google.common.annotations.Beta;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.SettableFuture;
 import org.opendaylight.controller.cluster.schema.provider.RemoteYangTextSourceProvider;
-import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
-import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.source.YangTextSource;
 import org.opendaylight.yangtools.yang.model.repo.spi.SchemaSourceProvider;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.ExecutionContext;
-import scala.concurrent.Future;
 
 /**
  * Provides schema sources from {@link RemoteYangTextSourceProvider}.
  */
 @Beta
-public class RemoteSchemaProvider implements SchemaSourceProvider<YangTextSchemaSource> {
+public class RemoteSchemaProvider implements SchemaSourceProvider<YangTextSource> {
     private static final Logger LOG = LoggerFactory.getLogger(RemoteSchemaProvider.class);
 
     private final RemoteYangTextSourceProvider remoteRepo;
@@ -37,21 +36,18 @@ public class RemoteSchemaProvider implements SchemaSourceProvider<YangTextSchema
     }
 
     @Override
-    public ListenableFuture<YangTextSchemaSource> getSource(final SourceIdentifier sourceIdentifier) {
-        LOG.trace("Getting yang schema source for {}", sourceIdentifier.getName());
+    public ListenableFuture<YangTextSource> getSource(final SourceIdentifier sourceIdentifier) {
+        LOG.trace("Getting yang schema source for {}", sourceIdentifier.name().getLocalName());
 
-        Future<YangTextSchemaSourceSerializationProxy> result = remoteRepo.getYangTextSchemaSource(sourceIdentifier);
-
-        final SettableFuture<YangTextSchemaSource> res = SettableFuture.create();
-        result.onComplete(new OnComplete<YangTextSchemaSourceSerializationProxy>() {
+        final var res = SettableFuture.<YangTextSource>create();
+        remoteRepo.getYangTextSchemaSource(sourceIdentifier).onComplete(new OnComplete<>() {
             @Override
-            public void onComplete(final Throwable throwable,
-                    final YangTextSchemaSourceSerializationProxy yangTextSchemaSourceSerializationProxy) {
-                if (yangTextSchemaSourceSerializationProxy != null) {
-                    res.set(yangTextSchemaSourceSerializationProxy.getRepresentation());
+            public void onComplete(final Throwable failure, final YangTextSchemaSourceSerializationProxy success) {
+                if (success != null) {
+                    res.set(success.getRepresentation());
                 }
-                if (throwable != null) {
-                    res.setException(throwable);
+                if (failure != null) {
+                    res.setException(failure);
                 }
             }
         }, executionContext);
index 5e88952dda26f2e2ad670c202f6f67b0e795e965..eea0aa86071d115e04f4bfbd23b83f8fa29a066d 100644 (file)
@@ -17,9 +17,9 @@ import com.google.common.util.concurrent.MoreExecutors;
 import java.io.IOException;
 import java.util.Set;
 import org.opendaylight.controller.cluster.schema.provider.RemoteYangTextSourceProvider;
+import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.source.YangTextSource;
 import org.opendaylight.yangtools.yang.model.repo.api.SchemaRepository;
-import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
-import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.Future;
@@ -51,16 +51,16 @@ public class RemoteYangTextSourceProviderImpl implements RemoteYangTextSourcePro
         LOG.trace("Sending yang schema source for {}", identifier);
 
         final Promise<YangTextSchemaSourceSerializationProxy> promise = akka.dispatch.Futures.promise();
-        ListenableFuture<YangTextSchemaSource> future =
-                repository.getSchemaSource(identifier, YangTextSchemaSource.class);
+        ListenableFuture<YangTextSource> future =
+                repository.getSchemaSource(identifier, YangTextSource.class);
 
-        Futures.addCallback(future, new FutureCallback<YangTextSchemaSource>() {
+        Futures.addCallback(future, new FutureCallback<YangTextSource>() {
             @Override
-            public void onSuccess(final YangTextSchemaSource result) {
+            public void onSuccess(final YangTextSource result) {
                 try {
                     promise.success(new YangTextSchemaSourceSerializationProxy(result));
                 } catch (IOException e) {
-                    LOG.warn("Unable to read schema source for {}", result.getIdentifier(), e);
+                    LOG.warn("Unable to read schema source for {}", result.sourceId(), e);
                     promise.failure(e);
                 }
             }
index 202de58a2780fd52efc401b0779a54e287f731b8..9ad9948e6c86c4ac0272cc04be5c8860dae11348 100644 (file)
@@ -5,36 +5,38 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.schema.provider.impl;
 
 import com.google.common.annotations.Beta;
-import com.google.common.io.ByteSource;
+import com.google.common.io.CharSource;
 import java.io.IOException;
 import java.io.Serializable;
 import org.opendaylight.yangtools.yang.common.Revision;
-import org.opendaylight.yangtools.yang.model.repo.api.RevisionSourceIdentifier;
-import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.opendaylight.yangtools.yang.common.UnresolvedQName.Unqualified;
+import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.source.YangTextSource;
+import org.opendaylight.yangtools.yang.model.spi.source.DelegatedYangTextSource;
 
 /**
- * {@link org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource} serialization proxy.
+ * {@link YangTextSource} serialization proxy.
  */
 @Beta
 public class YangTextSchemaSourceSerializationProxy implements Serializable {
     private static final long serialVersionUID = -6361268518176019477L;
 
-    private final byte[] schemaSource;
+    private final String schemaSource;
     private final Revision revision;
     private final String name;
 
-    public YangTextSchemaSourceSerializationProxy(final YangTextSchemaSource source) throws IOException {
-        this.revision = source.getIdentifier().getRevision().orElse(null);
-        this.name = source.getIdentifier().getName();
-        this.schemaSource = source.read();
+    public YangTextSchemaSourceSerializationProxy(final YangTextSource source) throws IOException {
+        final var sourceId = source.sourceId();
+        revision = sourceId.revision();
+        name = sourceId.name().getLocalName();
+        schemaSource = source.read();
     }
 
-    public YangTextSchemaSource getRepresentation() {
-        return YangTextSchemaSource.delegateForByteSource(
-                RevisionSourceIdentifier.create(name, revision), ByteSource.wrap(schemaSource));
+    public YangTextSource getRepresentation() {
+        return new DelegatedYangTextSource(new SourceIdentifier(Unqualified.of(name), revision),
+            CharSource.wrap(schemaSource));
     }
 }
index 3bead64be8ec62dac7c4e763aad690468d8a998a..f1c3eae731da0543bd030f91856a3e64e2e6630a 100644 (file)
@@ -10,11 +10,11 @@ package org.opendaylight.controller.cluster.common.actor;
 
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
 
 import com.google.common.testing.FakeTicker;
 import java.util.List;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -56,9 +56,8 @@ public class MessageTrackerTest {
         ticker.advance(20, MILLISECONDS);
 
         MessageTracker.Context context2 = messageTracker.received(new Foo());
-        Assert.assertEquals(true, context2.error().isPresent());
-        Assert.assertEquals(0, context2.error().get().getMessageProcessingTimesSinceLastExpectedMessage().size());
-
+        assertEquals(true, context2.error().isPresent());
+        assertEquals(0, context2.error().orElseThrow().getMessageProcessingTimesSinceLastExpectedMessage().size());
     }
 
     @Test
@@ -78,21 +77,21 @@ public class MessageTrackerTest {
 
         MessageTracker.Context context2 = messageTracker.received(new Foo());
 
-        Assert.assertEquals(true, context2.error().isPresent());
+        assertEquals(true, context2.error().isPresent());
 
-        MessageTracker.Error error = context2.error().get();
+        MessageTracker.Error error = context2.error().orElseThrow();
 
         List<MessageTracker.MessageProcessingTime> messageProcessingTimes =
                 error.getMessageProcessingTimesSinceLastExpectedMessage();
 
-        Assert.assertEquals(3, messageProcessingTimes.size());
+        assertEquals(3, messageProcessingTimes.size());
 
-        Assert.assertEquals(String.class, messageProcessingTimes.get(0).getMessageClass());
-        Assert.assertEquals(Long.class, messageProcessingTimes.get(1).getMessageClass());
-        Assert.assertEquals(Integer.class, messageProcessingTimes.get(2).getMessageClass());
-        Assert.assertTrue(messageProcessingTimes.get(2).getElapsedTimeInNanos() > MILLISECONDS.toNanos(10));
-        Assert.assertEquals(Foo.class, error.getLastExpectedMessage().getClass());
-        Assert.assertEquals(Foo.class, error.getCurrentExpectedMessage().getClass());
+        assertEquals(String.class, messageProcessingTimes.get(0).getMessageClass());
+        assertEquals(Long.class, messageProcessingTimes.get(1).getMessageClass());
+        assertEquals(Integer.class, messageProcessingTimes.get(2).getMessageClass());
+        assertTrue(messageProcessingTimes.get(2).getElapsedTimeInNanos() > MILLISECONDS.toNanos(10));
+        assertEquals(Foo.class, error.getLastExpectedMessage().getClass());
+        assertEquals(Foo.class, error.getCurrentExpectedMessage().getClass());
 
         LOG.error("An error occurred : {}" , error);
     }
@@ -107,8 +106,7 @@ public class MessageTrackerTest {
         ticker.advance(1, MILLISECONDS);
 
         MessageTracker.Context context2 = messageTracker.received(new Foo());
-        Assert.assertEquals(false, context2.error().isPresent());
-
+        assertEquals(false, context2.error().isPresent());
     }
 
     @Test
@@ -117,12 +115,7 @@ public class MessageTrackerTest {
 
         messageTracker.received(new Foo());
 
-        try {
-            messageTracker.received(new Foo());
-            fail("Expected an IllegalStateException");
-        } catch (IllegalStateException e) {
-            // expected
-        }
+        assertThrows(IllegalStateException.class, () -> messageTracker.received(new Foo()));
     }
 
     @Test
@@ -139,15 +132,15 @@ public class MessageTrackerTest {
 
         MessageTracker.Context context = messageTracker.received(new Foo());
 
-        Assert.assertEquals(true, context.error().isPresent());
+        assertEquals(true, context.error().isPresent());
 
-        MessageTracker.Error error = context.error().get();
+        MessageTracker.Error error = context.error().orElseThrow();
 
-        Assert.assertEquals(null, error.getLastExpectedMessage());
-        Assert.assertEquals(Foo.class, error.getCurrentExpectedMessage().getClass());
+        assertEquals(null, error.getLastExpectedMessage());
+        assertEquals(Foo.class, error.getCurrentExpectedMessage().getClass());
 
         String errorString = error.toString();
-        Assert.assertTrue(errorString.contains("Last Expected Message = null"));
+        assertTrue(errorString.contains("Last Expected Message = null"));
 
         LOG.error("An error occurred : {}", error);
     }
@@ -162,8 +155,7 @@ public class MessageTrackerTest {
 
         MessageTracker.Context context = messageTracker.received(new Foo());
 
-        Assert.assertEquals(true, context.error().isPresent());
-
+        assertEquals(true, context.error().isPresent());
     }
 
     @Test
@@ -172,20 +164,18 @@ public class MessageTrackerTest {
         messageTracker.begin();
 
         try (MessageTracker.Context ctx = messageTracker.received(45)) {
-            Assert.assertEquals(false, ctx.error().isPresent());
+            assertEquals(false, ctx.error().isPresent());
         }
         try (MessageTracker.Context ctx = messageTracker.received(45L)) {
-            Assert.assertEquals(false, ctx.error().isPresent());
+            assertEquals(false, ctx.error().isPresent());
         }
 
         List<MessageTracker.MessageProcessingTime> processingTimeList =
                 messageTracker.getMessagesSinceLastExpectedMessage();
 
-        Assert.assertEquals(2, processingTimeList.size());
+        assertEquals(2, processingTimeList.size());
 
         assertEquals(Integer.class, processingTimeList.get(0).getMessageClass());
         assertEquals(Long.class, processingTimeList.get(1).getMessageClass());
-
     }
-
 }
index 9af555d390f5ba36db9fa3e96f2ab001e879f6f2..630ebecb4eabc596067e45580db4dfbaf57966e3 100644 (file)
 package org.opendaylight.controller.cluster.datastore.node.utils.stream;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
-import com.google.common.collect.ImmutableSet;
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.DataInputStream;
-import java.io.DataOutput;
 import java.io.DataOutputStream;
 import java.io.IOException;
-import java.nio.charset.Charset;
-import java.util.Arrays;
-import java.util.Set;
+import java.nio.charset.StandardCharsets;
 import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.stream.Collectors;
 import javax.xml.transform.dom.DOMSource;
-import org.custommonkey.xmlunit.Diff;
-import org.custommonkey.xmlunit.XMLUnit;
 import org.junit.Test;
 import org.opendaylight.yangtools.util.xml.UntrustedXML;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.AugmentationNode;
-import org.opendaylight.yangtools.yang.data.api.schema.ChoiceNode;
-import org.opendaylight.yangtools.yang.data.api.schema.DOMSourceAnyxmlNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
 import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.UnkeyedListEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.UnkeyedListNode;
-import org.opendaylight.yangtools.yang.data.api.schema.UserMapNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.w3c.dom.Document;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.xmlunit.builder.DiffBuilder;
 
 public class SerializationUtilsTest {
-
-    private static final QName CONTAINER_Q_NAME = QName.create("ns-1", "2017-03-17", "container1");
+    private static final QName CONTAINER1 = QName.create("ns-1", "2017-03-17", "container1");
 
     @Test
-    public void testSerializeDeserializeNodes() throws IOException {
-        final NormalizedNode normalizedNode = createNormalizedNode();
-        final byte[] bytes = serializeNormalizedNode(normalizedNode);
-        assertEquals(10564, bytes.length);
-        assertEquals(normalizedNode, deserializeNormalizedNode(bytes));
+    public void testSerializeDeserializeNodes() throws Exception {
+        final var normalizedNode = createNormalizedNode();
+        final var bytes = serialize(normalizedNode);
+        assertEquals(10567, bytes.length);
+        assertEquals(normalizedNode, deserialize(bytes));
     }
 
     @Test
     public void testSerializeDeserializeAnyXmlNode() throws Exception {
-        final ByteArrayInputStream is =
-                new ByteArrayInputStream("<xml><data/></xml>".getBytes(Charset.defaultCharset()));
-        final Document parse = UntrustedXML.newDocumentBuilder().parse(is);
-        final DOMSourceAnyxmlNode anyXmlNode = Builders.anyXmlBuilder()
-                  .withNodeIdentifier(id("anyXmlNode"))
-                .withValue(new DOMSource(parse))
-                .build();
-        final byte[] bytes = serializeNormalizedNode(anyXmlNode);
+        final var parse = UntrustedXML.newDocumentBuilder().parse(
+            new ByteArrayInputStream("<xml><data/></xml>".getBytes(StandardCharsets.UTF_8)));
+        final var anyXmlNode = ImmutableNodes.newAnyxmlBuilder(DOMSource.class)
+            .withNodeIdentifier(id("anyXmlNode"))
+            .withValue(new DOMSource(parse))
+            .build();
+        final byte[] bytes = serialize(anyXmlNode);
         assertEquals(113, bytes.length);
-        final NormalizedNode deserialized = deserializeNormalizedNode(bytes);
-        final DOMSource value = (DOMSource) deserialized.body();
-        final Diff diff = XMLUnit.compareXML((Document) anyXmlNode.body().getNode(),
-                value.getNode().getOwnerDocument());
-        assertTrue(diff.toString(), diff.similar());
+
+        final var diff = DiffBuilder.compare(anyXmlNode.body().getNode())
+            // FIXME: why all this magic?
+            .withTest(((DOMSource) deserialize(bytes).body()).getNode().getOwnerDocument())
+            .checkForSimilar()
+            .build();
+        assertFalse(diff.toString(), diff.hasDifferences());
     }
 
     @Test
     public void testSerializeDeserializePath() throws IOException {
-        final ByteArrayOutputStream bos = new ByteArrayOutputStream();
-        final DataOutput out = new DataOutputStream(bos);
-        final YangInstanceIdentifier path = YangInstanceIdentifier.builder()
-                .node(id("container1"))
-                .node(autmentationId("list1", "list2"))
-                .node(listId("list1", "keyName1", "keyValue1"))
-                .node(leafSetId("leafSer1", "leafSetValue1"))
-                .build();
-        SerializationUtils.writePath(out, path);
+        final var path = YangInstanceIdentifier.builder()
+            .node(id("container1"))
+            .node(listId("list1", "keyName1", "keyValue1"))
+            .node(leafSetId("leafSer1", "leafSetValue1"))
+            .build();
+
+        final var bos = new ByteArrayOutputStream();
+        try (var out = new DataOutputStream(bos)) {
+            SerializationUtils.writePath(out, path);
+        }
 
-        final byte[] bytes = bos.toByteArray();
-        assertEquals(119, bytes.length);
+        final var bytes = bos.toByteArray();
+        assertEquals(105, bytes.length);
 
-        final YangInstanceIdentifier deserialized =
-                SerializationUtils.readPath(new DataInputStream(new ByteArrayInputStream(bytes)));
-        assertEquals(path, deserialized);
+        assertEquals(path, SerializationUtils.readPath(new DataInputStream(new ByteArrayInputStream(bytes))));
     }
 
     @Test
     public void testSerializeDeserializePathAndNode() throws IOException {
-        final ByteArrayOutputStream bos = new ByteArrayOutputStream();
-        final DataOutput out = new DataOutputStream(bos);
-        final NormalizedNode node = createNormalizedNode();
-        final YangInstanceIdentifier path = YangInstanceIdentifier.create(id("container1"));
-        SerializationUtils.writeNodeAndPath(out, path, node);
-
-        final byte[] bytes = bos.toByteArray();
-        assertEquals(10566, bytes.length);
+        final var path = YangInstanceIdentifier.of(id("container1"));
+        final var node = createNormalizedNode();
 
-        final DataInputStream in = new DataInputStream(new ByteArrayInputStream(bytes));
-        final AtomicBoolean applierCalled = new AtomicBoolean(false);
-        SerializationUtils.readNodeAndPath(in, applierCalled, (instance, deserializedPath, deserializedNode) -> {
-            assertEquals(path, deserializedPath);
-            assertEquals(node, deserializedNode);
-            applierCalled.set(true);
-        });
-        assertTrue(applierCalled.get());
-    }
-
-    @Test
-    public void testSerializeDeserializeAugmentNoref() throws IOException {
-        final YangInstanceIdentifier expected = YangInstanceIdentifier.create(
-            AugmentationIdentifier.create(ImmutableSet.of(
-                QName.create("foo", "leaf1"),
-                QName.create("bar", "leaf2"))));
-
-        final ByteArrayOutputStream bos = new ByteArrayOutputStream();
-        final DataOutput out = new DataOutputStream(bos);
-        SerializationUtils.writePath(out, expected);
+        final var bos = new ByteArrayOutputStream();
+        try (var out = new DataOutputStream(bos)) {
+            SerializationUtils.writeNodeAndPath(out, path, node);
+        }
 
         final byte[] bytes = bos.toByteArray();
-        assertEquals(37, bytes.length);
-
-        final DataInputStream in = new DataInputStream(new ByteArrayInputStream(bytes));
-        final YangInstanceIdentifier read = SerializationUtils.readPath(in);
-        assertEquals(expected, read);
+        assertEquals(10569, bytes.length);
+
+        final var applierCalled = new AtomicBoolean(false);
+        try (var in = new DataInputStream(new ByteArrayInputStream(bytes))) {
+            SerializationUtils.readNodeAndPath(in, applierCalled, (instance, deserializedPath, deserializedNode) -> {
+                assertEquals(path, deserializedPath);
+                assertEquals(node, deserializedNode);
+                applierCalled.set(true);
+            });
+        }
+        assertTrue(applierCalled.get());
     }
 
-    private static NormalizedNode deserializeNormalizedNode(final byte[] bytes) throws IOException {
-        return SerializationUtils.readNormalizedNode(new DataInputStream(new ByteArrayInputStream(bytes))).get();
+    private static NormalizedNode deserialize(final byte[] bytes) throws Exception {
+        return SerializationUtils.readNormalizedNode(new DataInputStream(new ByteArrayInputStream(bytes)))
+            .orElseThrow();
     }
 
-    private static byte[] serializeNormalizedNode(final NormalizedNode node) throws IOException {
-        ByteArrayOutputStream bos = new ByteArrayOutputStream();
+    private static byte[] serialize(final NormalizedNode node) throws Exception {
+        final var bos = new ByteArrayOutputStream();
         SerializationUtils.writeNormalizedNode(new DataOutputStream(bos), node);
         return bos.toByteArray();
     }
 
-    private static NormalizedNode createNormalizedNode() {
-        final LeafSetNode<Object> leafSetNode = Builders.leafSetBuilder()
-                .withNodeIdentifier(id("leafSetNode"))
-                .withChild(createLeafSetEntry("leafSetNode", "leafSetValue1"))
-                .withChild(createLeafSetEntry("leafSetNode", "leafSetValue2"))
-                .build();
-        final LeafSetNode<Object> orderedLeafSetNode = Builders.orderedLeafSetBuilder()
-                .withNodeIdentifier(id("orderedLeafSetNode"))
-                .withChild(createLeafSetEntry("orderedLeafSetNode", "value1"))
-                .withChild(createLeafSetEntry("orderedLeafSetNode", "value2"))
-                .build();
-        final LeafNode<Boolean> booleanLeaf = createLeaf("booleanLeaf", true);
-        final LeafNode<Byte> byteLeaf = createLeaf("byteLeaf", (byte) 0);
-        final LeafNode<Short> shortLeaf = createLeaf("shortLeaf", (short) 55);
-        final LeafNode<Integer> intLeaf = createLeaf("intLeaf", 11);
-        final LeafNode<Long> longLeaf = createLeaf("longLeaf", 151515L);
-        final LeafNode<String> stringLeaf = createLeaf("stringLeaf", "stringValue");
-        final LeafNode<String> longStringLeaf = createLeaf("longStringLeaf", getLongString());
-        final LeafNode<QName> qNameLeaf = createLeaf("stringLeaf", QName.create("base", "qName"));
-        final LeafNode<YangInstanceIdentifier> idLeaf = createLeaf("stringLeaf", YangInstanceIdentifier.empty());
-        final MapEntryNode entry1 = Builders.mapEntryBuilder()
-                .withNodeIdentifier(listId("mapNode", "key", "key1"))
+    private static ContainerNode createNormalizedNode() {
+        final var stringLeaf = createLeaf("stringLeaf", "stringValue");
+        final var entry1 = ImmutableNodes.newMapEntryBuilder()
+            .withNodeIdentifier(listId("mapNode", "key", "key1"))
+            .withChild(stringLeaf)
+            .build();
+        final var entry2 = ImmutableNodes.newMapEntryBuilder()
+            .withNodeIdentifier(listId("mapNode", "key", "key2"))
+            .withChild(stringLeaf)
+            .build();
+
+        return ImmutableNodes.newContainerBuilder()
+                .withNodeIdentifier(new NodeIdentifier(CONTAINER1))
+                .withChild(createLeaf("booleanLeaf", true))
+                .withChild(createLeaf("byteLeaf", (byte) 0))
+                .withChild(createLeaf("shortLeaf", (short) 55))
+                .withChild(createLeaf("intLeaf", 11))
+                .withChild(createLeaf("longLeaf", 151515L))
                 .withChild(stringLeaf)
-                .build();
-        final MapEntryNode entry2 = Builders.mapEntryBuilder()
-                .withNodeIdentifier(listId("mapNode", "key", "key2"))
-                .withChild(stringLeaf)
-                .build();
-        final MapNode mapNode = Builders.mapBuilder()
-                .withNodeIdentifier(id("mapNode"))
-                .withChild(entry1)
-                .withChild(entry2)
-                .build();
-        final UserMapNode orderedMapNode = Builders.orderedMapBuilder()
-                .withNodeIdentifier(id("orderedMapNode"))
-                .withChild(entry2)
-                .withChild(entry1)
-                .build();
-        final UnkeyedListEntryNode unkeyedListEntry1 = Builders.unkeyedListEntryBuilder()
-                .withNodeIdentifier(id("unkeyedList"))
-                .withChild(stringLeaf)
-                .build();
-        final UnkeyedListEntryNode unkeyedListEntry2 = Builders.unkeyedListEntryBuilder()
-                .withNodeIdentifier(id("unkeyedList"))
-                .withChild(stringLeaf)
-                .build();
-        final UnkeyedListNode unkeyedListNode = Builders.unkeyedListBuilder()
-                .withNodeIdentifier(id("unkeyedList"))
-                .withChild(unkeyedListEntry1)
-                .withChild(unkeyedListEntry2)
-                .build();
-        final ImmutableSet<QName> childNames =
-                ImmutableSet.of(QName.create(CONTAINER_Q_NAME, "aug1"), QName.create(CONTAINER_Q_NAME, "aug1"));
-        final AugmentationNode augmentationNode = Builders.augmentationBuilder()
-                .withNodeIdentifier(new YangInstanceIdentifier.AugmentationIdentifier(childNames))
+                .withChild(createLeaf("longStringLeaf", "0123456789".repeat(1000)))
+                .withChild(createLeaf("stringLeaf", QName.create("base", "qName")))
+                .withChild(createLeaf("stringLeaf", YangInstanceIdentifier.of(QName.create("test", "test"))))
+                .withChild(ImmutableNodes.newSystemMapBuilder()
+                    .withNodeIdentifier(id("mapNode"))
+                    .withChild(entry1)
+                    .withChild(entry2)
+                    .build())
+                .withChild(ImmutableNodes.newUserMapBuilder()
+                    .withNodeIdentifier(id("orderedMapNode"))
+                    .withChild(entry2)
+                    .withChild(entry1)
+                    .build())
+                .withChild(ImmutableNodes.newUnkeyedListBuilder()
+                    .withNodeIdentifier(id("unkeyedList"))
+                    .withChild(ImmutableNodes.newUnkeyedListEntryBuilder()
+                        .withNodeIdentifier(id("unkeyedList"))
+                        .withChild(stringLeaf)
+                        .build())
+                    .withChild(ImmutableNodes.newUnkeyedListEntryBuilder()
+                        .withNodeIdentifier(id("unkeyedList"))
+                        .withChild(stringLeaf)
+                        .build())
+                    .build())
+                .withChild(ImmutableNodes.newSystemLeafSetBuilder()
+                    .withNodeIdentifier(id("leafSetNode"))
+                    .withChild(createLeafSetEntry("leafSetNode", "leafSetValue1"))
+                    .withChild(createLeafSetEntry("leafSetNode", "leafSetValue2"))
+                    .build())
+                .withChild(ImmutableNodes.newUserLeafSetBuilder()
+                    .withNodeIdentifier(id("orderedLeafSetNode"))
+                    .withChild(createLeafSetEntry("orderedLeafSetNode", "value1"))
+                    .withChild(createLeafSetEntry("orderedLeafSetNode", "value2"))
+                    .build())
                 .withChild(createLeaf("aug1", "aug1Value"))
                 .withChild(createLeaf("aug2", "aug2Value"))
-                .build();
-        final ChoiceNode choiceNode = Builders.choiceBuilder()
-                .withNodeIdentifier(id("choiceNode"))
-                .withChild(createLeaf("choiceLeaf", 12))
-                .build();
-        return Builders.containerBuilder()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(CONTAINER_Q_NAME))
-                .withChild(booleanLeaf)
-                .withChild(byteLeaf)
-                .withChild(shortLeaf)
-                .withChild(intLeaf)
-                .withChild(longLeaf)
-                .withChild(stringLeaf)
-                .withChild(longStringLeaf)
-                .withChild(qNameLeaf)
-                .withChild(idLeaf)
-                .withChild(mapNode)
-                .withChild(orderedMapNode)
-                .withChild(unkeyedListNode)
-                .withChild(leafSetNode)
-                .withChild(orderedLeafSetNode)
-                .withChild(augmentationNode)
-                .withChild(choiceNode)
+                .withChild(ImmutableNodes.newChoiceBuilder()
+                    .withNodeIdentifier(id("choiceNode"))
+                    .withChild(createLeaf("choiceLeaf", 12))
+                    .build())
                 .build();
     }
 
@@ -234,39 +184,20 @@ public class SerializationUtilsTest {
     }
 
     private static LeafSetEntryNode<Object> createLeafSetEntry(final String leafSet, final String value) {
-        return Builders.leafSetEntryBuilder()
-                .withNodeIdentifier(leafSetId(leafSet, value))
-                .withValue(value)
-                .build();
+        return ImmutableNodes.leafSetEntry(leafSetId(leafSet, value));
     }
 
-    private static YangInstanceIdentifier.NodeIdentifier id(final String name) {
-        return new YangInstanceIdentifier.NodeIdentifier(QName.create(CONTAINER_Q_NAME, name));
+    private static NodeIdentifier id(final String name) {
+        return new NodeIdentifier(QName.create(CONTAINER1, name));
     }
 
-    private static YangInstanceIdentifier.NodeIdentifierWithPredicates listId(final String listName,
-                                                                              final String keyName,
-                                                                              final Object keyValue) {
-        return YangInstanceIdentifier.NodeIdentifierWithPredicates.of(QName.create(CONTAINER_Q_NAME, listName),
-                QName.create(CONTAINER_Q_NAME, keyName), keyValue);
+    private static NodeIdentifierWithPredicates listId(final String listName, final String keyName,
+            final Object keyValue) {
+        return NodeIdentifierWithPredicates.of(QName.create(CONTAINER1, listName), QName.create(CONTAINER1, keyName),
+            keyValue);
     }
 
-    private static <T> YangInstanceIdentifier.NodeWithValue<T> leafSetId(final String node, final T value) {
-        return new YangInstanceIdentifier.NodeWithValue<>(QName.create(CONTAINER_Q_NAME, node), value);
-    }
-
-    private static YangInstanceIdentifier.AugmentationIdentifier autmentationId(final String... nodes) {
-        final Set<QName> qNames = Arrays.stream(nodes)
-                .map(node -> QName.create(CONTAINER_Q_NAME, node))
-                .collect(Collectors.toSet());
-        return new YangInstanceIdentifier.AugmentationIdentifier(qNames);
-    }
-
-    private static String getLongString() {
-        final StringBuilder builder = new StringBuilder(10000);
-        for (int i = 0; i < 1000; i++) {
-            builder.append("0123456789");
-        }
-        return builder.toString();
+    private static <T> NodeWithValue<T> leafSetId(final String node, final T value) {
+        return new NodeWithValue<>(QName.create(CONTAINER1, node), value);
     }
 }
index 8001ebd05158528cbc958659dec1e9a0f9bdf9fd..6b150131b3e5b058b8fe65e1f281c523b29549b3 100644 (file)
@@ -11,11 +11,11 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.mock;
+import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.containerNode;
 import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntry;
 import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntryBuilder;
 import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapNodeBuilder;
 
-import com.google.common.collect.Sets;
 import java.io.IOException;
 import java.util.Optional;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -26,19 +26,16 @@ import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.datastore.node.utils.NormalizedNodeNavigator;
 import org.opendaylight.controller.cluster.datastore.util.TestModel;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
-import org.opendaylight.yangtools.yang.data.api.schema.DOMSourceAnyxmlNode;
+import org.opendaylight.yangtools.yang.data.api.schema.AnyxmlNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
 import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.SystemLeafSetNode;
+import org.opendaylight.yangtools.yang.data.api.schema.SystemMapNode;
 import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetEntryNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetNodeBuilder;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 
 @RunWith(MockitoJUnitRunner.StrictStubs.class)
@@ -78,7 +75,6 @@ public class NormalizedNodePrunerTest {
         NormalizedNode actual = pruner.getResult().orElseThrow();
 
         assertEquals(expected, actual);
-
     }
 
     @Test(expected = IllegalStateException.class)
@@ -96,10 +92,8 @@ public class NormalizedNodePrunerTest {
         assertEquals(expected, actual);
 
         NormalizedNodeWriter.forStreamWriter(pruner).write(expected);
-
     }
 
-
     @Test
     public void testNodesPrunedWhenAugmentationSchemaMissing() throws IOException {
         AbstractNormalizedNodePruner pruner = prunerNoAugSchema(TestModel.TEST_PATH);
@@ -136,7 +130,6 @@ public class NormalizedNodePrunerTest {
 
         // Asserting true here instead of checking actual value because I don't want this assertion to be fragile
         assertTrue(countNodes(expected, "urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test") > 0);
-
     }
 
     private static int countNodes(final NormalizedNode normalizedNode, final String namespaceFilter) {
@@ -145,12 +138,10 @@ public class NormalizedNodePrunerTest {
         }
         final AtomicInteger count = new AtomicInteger();
         new NormalizedNodeNavigator((level, parentPath, normalizedNode1) -> {
-            if (!(normalizedNode1.getIdentifier() instanceof AugmentationIdentifier)) {
-                if (normalizedNode1.getIdentifier().getNodeType().getNamespace().toString().contains(namespaceFilter)) {
-                    count.incrementAndGet();
-                }
+            if (normalizedNode1.name().getNodeType().getNamespace().toString().contains(namespaceFilter)) {
+                count.incrementAndGet();
             }
-        }).navigate(YangInstanceIdentifier.empty().toString(), normalizedNode);
+        }).navigate(YangInstanceIdentifier.of().toString(), normalizedNode);
 
         return count.get();
     }
@@ -158,33 +149,16 @@ public class NormalizedNodePrunerTest {
     @Test
     public void testLeafNodeNotPrunedWhenHasNoParent() throws IOException {
         AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.DESC_QNAME));
-        NormalizedNode input = Builders.leafBuilder().withNodeIdentifier(
-                new NodeIdentifier(TestModel.DESC_QNAME)).withValue("test").build();
+        NormalizedNode input = ImmutableNodes.leafNode(TestModel.DESC_QNAME, "test");
         NormalizedNodeWriter.forStreamWriter(pruner).write(input);
 
         assertEquals("normalizedNode", input, pruner.getResult().orElseThrow());
     }
 
-    @Test
-    public void testLeafNodePrunedWhenHasAugmentationParentAndSchemaMissing() throws IOException {
-        AugmentationIdentifier augId = new AugmentationIdentifier(Sets.newHashSet(TestModel.AUG_CONT_QNAME));
-        AbstractNormalizedNodePruner pruner = prunerFullSchema(YangInstanceIdentifier.builder()
-                .node(TestModel.TEST_QNAME).node(TestModel.AUGMENTED_LIST_QNAME)
-                        .node(TestModel.AUGMENTED_LIST_QNAME).node(augId).build());
-        LeafNode<Object> child = Builders.leafBuilder().withNodeIdentifier(
-                new NodeIdentifier(TestModel.INVALID_QNAME)).withValue("test").build();
-        NormalizedNode input = Builders.augmentationBuilder().withNodeIdentifier(augId).withChild(child).build();
-        NormalizedNodeWriter.forStreamWriter(pruner).write(input);
-
-        NormalizedNode actual = pruner.getResult().orElseThrow();
-        assertEquals("normalizedNode", Builders.augmentationBuilder().withNodeIdentifier(augId).build(), actual);
-    }
-
     @Test
     public void testLeafNodePrunedWhenHasNoParentAndSchemaMissing() throws IOException {
         AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.INVALID_QNAME));
-        NormalizedNode input = Builders.leafBuilder().withNodeIdentifier(
-                new NodeIdentifier(TestModel.INVALID_QNAME)).withValue("test").build();
+        LeafNode<String> input = ImmutableNodes.leafNode(TestModel.INVALID_QNAME, "test");
         NormalizedNodeWriter.forStreamWriter(pruner).write(input);
 
         assertEquals(Optional.empty(), pruner.getResult());
@@ -193,8 +167,7 @@ public class NormalizedNodePrunerTest {
     @Test
     public void testLeafSetEntryNodeNotPrunedWhenHasNoParent() throws IOException {
         AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.SHOE_QNAME));
-        NormalizedNode input = Builders.leafSetEntryBuilder().withValue("puma").withNodeIdentifier(
-                new NodeWithValue<>(TestModel.SHOE_QNAME, "puma")).build();
+        LeafSetEntryNode<?> input = ImmutableNodes.leafSetEntry(TestModel.SHOE_QNAME, "puma");
         NormalizedNodeWriter.forStreamWriter(pruner).write(input);
 
         NormalizedNode actual = pruner.getResult().orElseThrow();
@@ -204,10 +177,10 @@ public class NormalizedNodePrunerTest {
     @Test
     public void testLeafSetEntryNodeNotPrunedWhenHasParent() throws IOException {
         AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.SHOE_QNAME));
-        LeafSetEntryNode<Object> child = Builders.leafSetEntryBuilder().withValue("puma").withNodeIdentifier(
-                new NodeWithValue<>(TestModel.SHOE_QNAME, "puma")).build();
-        NormalizedNode input = Builders.leafSetBuilder().withNodeIdentifier(
-                new NodeIdentifier(TestModel.SHOE_QNAME)).withChild(child).build();
+        SystemLeafSetNode<?> input = ImmutableNodes.<String>newSystemLeafSetBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.SHOE_QNAME))
+            .withChildValue("puma")
+            .build();
         NormalizedNodeWriter.forStreamWriter(pruner).write(input);
 
         NormalizedNode actual = pruner.getResult().orElseThrow();
@@ -217,8 +190,7 @@ public class NormalizedNodePrunerTest {
     @Test
     public void testLeafSetEntryNodePrunedWhenHasNoParentAndSchemaMissing() throws IOException {
         AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.INVALID_QNAME));
-        NormalizedNode input = Builders.leafSetEntryBuilder().withValue("test").withNodeIdentifier(
-                new NodeWithValue<>(TestModel.INVALID_QNAME, "test")).build();
+        LeafSetEntryNode<?> input = ImmutableNodes.leafSetEntry(TestModel.INVALID_QNAME, "test");
         NormalizedNodeWriter.forStreamWriter(pruner).write(input);
 
         assertEquals(Optional.empty(), pruner.getResult());
@@ -227,11 +199,10 @@ public class NormalizedNodePrunerTest {
     @Test
     public void testLeafSetEntryNodePrunedWhenHasParentAndSchemaMissing() throws IOException {
         AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.INVALID_QNAME));
-        LeafSetEntryNode<Object> child = Builders.leafSetEntryBuilder().withValue("test").withNodeIdentifier(
-                new NodeWithValue<>(TestModel.INVALID_QNAME, "test")).build();
-        NormalizedNode input = Builders.leafSetBuilder().withNodeIdentifier(
-                new NodeIdentifier(TestModel.INVALID_QNAME)).withChild(child).build();
-        NormalizedNodeWriter.forStreamWriter(pruner).write(input);
+        NormalizedNodeWriter.forStreamWriter(pruner).write(ImmutableNodes.<String>newSystemLeafSetBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.INVALID_QNAME))
+            .withChildValue("test")
+            .build());
 
         assertEquals(Optional.empty(), pruner.getResult());
     }
@@ -239,33 +210,37 @@ public class NormalizedNodePrunerTest {
     @Test
     public void testAnyXMLNodeNotPrunedWhenHasNoParent() throws IOException {
         AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.ANY_XML_QNAME));
-        NormalizedNode input = Builders.anyXmlBuilder().withNodeIdentifier(
-                new NodeIdentifier(TestModel.ANY_XML_QNAME)).withValue(mock(DOMSource.class)).build();
+        AnyxmlNode<DOMSource> input = ImmutableNodes.newAnyxmlBuilder(DOMSource.class)
+            .withNodeIdentifier(new NodeIdentifier(TestModel.ANY_XML_QNAME))
+            .withValue(mock(DOMSource.class))
+            .build();
         NormalizedNodeWriter.forStreamWriter(pruner).write(input);
 
-        NormalizedNode actual = pruner.getResult().orElseThrow();
-        assertEquals("normalizedNode", input, actual);
+        assertEquals(input, pruner.getResult().orElseThrow());
     }
 
     @Test
     public void testAnyXMLNodeNotPrunedWhenHasParent() throws IOException {
-        AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH);
-        DOMSourceAnyxmlNode child = Builders.anyXmlBuilder().withNodeIdentifier(
-                new NodeIdentifier(TestModel.ANY_XML_QNAME)).withValue(mock(DOMSource.class)).build();
-        NormalizedNode input = Builders.containerBuilder().withNodeIdentifier(
-                new NodeIdentifier(TestModel.TEST_QNAME)).withChild(child).build();
+        final var pruner = prunerFullSchema(TestModel.TEST_PATH);
+        final var input = ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.newAnyxmlBuilder(DOMSource.class)
+                .withNodeIdentifier(new NodeIdentifier(TestModel.ANY_XML_QNAME))
+                .withValue(mock(DOMSource.class))
+                .build())
+            .build();
         NormalizedNodeWriter.forStreamWriter(pruner).write(input);
 
-        NormalizedNode actual = pruner.getResult().orElseThrow();
-        assertEquals("normalizedNode", input, actual);
+        assertEquals(input, pruner.getResult().orElseThrow());
     }
 
     @Test
     public void testAnyXmlNodePrunedWhenHasNoParentAndSchemaMissing() throws IOException {
         AbstractNormalizedNodePruner pruner = prunerNoTestSchema(TestModel.TEST_PATH.node(TestModel.ANY_XML_QNAME));
-        NormalizedNode input = Builders.anyXmlBuilder().withNodeIdentifier(
-                new NodeIdentifier(TestModel.ANY_XML_QNAME)).withValue(mock(DOMSource.class)).build();
-        NormalizedNodeWriter.forStreamWriter(pruner).write(input);
+        NormalizedNodeWriter.forStreamWriter(pruner).write(ImmutableNodes.newAnyxmlBuilder(DOMSource.class)
+            .withNodeIdentifier(new NodeIdentifier(TestModel.ANY_XML_QNAME))
+            .withValue(mock(DOMSource.class))
+            .build());
 
         assertEquals(Optional.empty(), pruner.getResult());
     }
@@ -278,11 +253,10 @@ public class NormalizedNodePrunerTest {
                 .node(TestModel.INNER_CONTAINER_QNAME).build();
         AbstractNormalizedNodePruner pruner = prunerFullSchema(path);
 
-        NormalizedNode input = ImmutableNodes.containerNode(TestModel.INNER_CONTAINER_QNAME);
+        ContainerNode input = containerNode(TestModel.INNER_CONTAINER_QNAME);
         NormalizedNodeWriter.forStreamWriter(pruner).write(input);
 
-        NormalizedNode actual = pruner.getResult().orElseThrow();
-        assertEquals("normalizedNode", input, actual);
+        assertEquals(input, pruner.getResult().orElseThrow());
     }
 
     @Test
@@ -293,8 +267,7 @@ public class NormalizedNodePrunerTest {
                 .node(TestModel.INVALID_QNAME).build();
         AbstractNormalizedNodePruner pruner = prunerFullSchema(path);
 
-        NormalizedNode input = ImmutableNodes.containerNode(TestModel.INVALID_QNAME);
-        NormalizedNodeWriter.forStreamWriter(pruner).write(input);
+        NormalizedNodeWriter.forStreamWriter(pruner).write(containerNode(TestModel.INVALID_QNAME));
 
         assertEquals(Optional.empty(), pruner.getResult());
     }
@@ -306,18 +279,20 @@ public class NormalizedNodePrunerTest {
                 .build();
         AbstractNormalizedNodePruner pruner = prunerFullSchema(path);
 
-        MapNode innerList = mapNodeBuilder(TestModel.INNER_LIST_QNAME).withChild(mapEntryBuilder(
-                TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one").withChild(
-                        ImmutableNodes.containerNode(TestModel.INVALID_QNAME)).build()).build();
-        NormalizedNode input = mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
-                .withChild(innerList).build();
-        NormalizedNodeWriter.forStreamWriter(pruner).write(input);
-
-        NormalizedNode expected = mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
-                .withChild(mapNodeBuilder(TestModel.INNER_LIST_QNAME).withChild(mapEntryBuilder(
-                    TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one").build()).build()).build();
-        NormalizedNode actual = pruner.getResult().orElseThrow();
-        assertEquals("normalizedNode", expected, actual);
+        NormalizedNodeWriter.forStreamWriter(pruner)
+            .write(mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
+                .withChild(mapNodeBuilder(TestModel.INNER_LIST_QNAME)
+                    .withChild(mapEntryBuilder(TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one")
+                        .withChild(containerNode(TestModel.INVALID_QNAME))
+                        .build())
+                    .build())
+                .build());
+
+        assertEquals(mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
+            .withChild(mapNodeBuilder(TestModel.INNER_LIST_QNAME)
+                .withChild(mapEntryBuilder(TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one").build())
+                .build())
+            .build(), pruner.getResult().orElseThrow());
     }
 
     @Test
@@ -327,13 +302,14 @@ public class NormalizedNodePrunerTest {
                 .node(TestModel.INNER_LIST_QNAME).build();
         AbstractNormalizedNodePruner pruner = prunerFullSchema(path);
 
-        MapNode input = mapNodeBuilder(TestModel.INNER_LIST_QNAME).withChild(mapEntryBuilder(
-                TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one").withChild(
-                        ImmutableNodes.containerNode(TestModel.INNER_CONTAINER_QNAME)).build()).build();
+        SystemMapNode input = mapNodeBuilder(TestModel.INNER_LIST_QNAME)
+            .withChild(mapEntryBuilder(TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one")
+                .withChild(containerNode(TestModel.INNER_CONTAINER_QNAME))
+                .build())
+            .build();
         NormalizedNodeWriter.forStreamWriter(pruner).write(input);
 
-        NormalizedNode actual = pruner.getResult().orElseThrow();
-        assertEquals("normalizedNode", input, actual);
+        assertEquals(input, pruner.getResult().orElseThrow());
     }
 
     @Test
@@ -343,10 +319,11 @@ public class NormalizedNodePrunerTest {
                 .node(TestModel.INVALID_QNAME).build();
         AbstractNormalizedNodePruner pruner = prunerFullSchema(path);
 
-        MapNode input = mapNodeBuilder(TestModel.INVALID_QNAME).withChild(mapEntryBuilder(
-                TestModel.INVALID_QNAME, TestModel.NAME_QNAME, "one").withChild(
-                        ImmutableNodes.containerNode(TestModel.INNER_CONTAINER_QNAME)).build()).build();
-        NormalizedNodeWriter.forStreamWriter(pruner).write(input);
+        NormalizedNodeWriter.forStreamWriter(pruner).write(mapNodeBuilder(TestModel.INVALID_QNAME)
+            .withChild(mapEntryBuilder(TestModel.INVALID_QNAME, TestModel.NAME_QNAME, "one")
+                .withChild(containerNode(TestModel.INNER_CONTAINER_QNAME))
+                .build())
+            .build());
 
         assertEquals(Optional.empty(), pruner.getResult());
     }
@@ -358,32 +335,27 @@ public class NormalizedNodePrunerTest {
                 .build();
         AbstractNormalizedNodePruner pruner = prunerFullSchema(path);
 
-        MapNode innerList = mapNodeBuilder(TestModel.INVALID_QNAME).withChild(mapEntryBuilder(
-                TestModel.INVALID_QNAME, TestModel.NAME_QNAME, "one").withChild(
-                        ImmutableNodes.containerNode(TestModel.INNER_CONTAINER_QNAME)).build()).build();
-        NormalizedNode input = mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
-                .withChild(innerList).build();
-        NormalizedNodeWriter.forStreamWriter(pruner).write(input);
-
-        NormalizedNode expected = mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1);
-        NormalizedNode actual = pruner.getResult().orElseThrow();
-        assertEquals("normalizedNode", expected, actual);
+        NormalizedNodeWriter.forStreamWriter(pruner)
+            .write(mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
+                .withChild(mapNodeBuilder(TestModel.INVALID_QNAME)
+                    .withChild(mapEntryBuilder(TestModel.INVALID_QNAME, TestModel.NAME_QNAME, "one")
+                        .withChild(containerNode(TestModel.INNER_CONTAINER_QNAME))
+                        .build())
+                    .build())
+                .build());
+
+        assertEquals(mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1),
+            pruner.getResult().orElseThrow());
     }
 
-    private static NormalizedNode createTestContainer() {
-        byte[] bytes1 = {1, 2, 3};
-        LeafSetEntryNode<Object> entry1 = ImmutableLeafSetEntryNodeBuilder.create().withNodeIdentifier(
-                new NodeWithValue<>(TestModel.BINARY_LEAF_LIST_QNAME, bytes1)).withValue(bytes1).build();
-
-        byte[] bytes2 = {};
-        LeafSetEntryNode<Object> entry2 = ImmutableLeafSetEntryNodeBuilder.create().withNodeIdentifier(
-                new NodeWithValue<>(TestModel.BINARY_LEAF_LIST_QNAME, bytes2)).withValue(bytes2).build();
-
+    private static ContainerNode createTestContainer() {
         return TestModel.createBaseTestContainerBuilder()
-                .withChild(ImmutableLeafSetNodeBuilder.create().withNodeIdentifier(
-                        new NodeIdentifier(TestModel.BINARY_LEAF_LIST_QNAME))
-                        .withChild(entry1).withChild(entry2).build())
-                .withChild(ImmutableNodes.leafNode(TestModel.SOME_BINARY_DATA_QNAME, new byte[]{1, 2, 3, 4}))
-                .build();
+            .withChild(ImmutableNodes.newSystemLeafSetBuilder()
+                .withNodeIdentifier(new NodeIdentifier(TestModel.BINARY_LEAF_LIST_QNAME))
+                .withChildValue(new byte[] {1, 2, 3})
+                .withChildValue(new byte[0])
+                .build())
+            .withChild(ImmutableNodes.leafNode(TestModel.SOME_BINARY_DATA_QNAME, new byte[] {1, 2, 3, 4}))
+            .build();
     }
 }
index bba8739813a6863f8f9129bb2379c316277d8f25..b29113cc8ec057159ce4396d47a3057f921d8022 100644 (file)
@@ -22,11 +22,9 @@ import org.opendaylight.yangtools.yang.common.Uint8;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.opendaylight.yangtools.yang.test.util.YangParserTestUtils;
 
@@ -55,9 +53,9 @@ public class UintAdaptingPrunerTest {
 
     @Test
     public void testListTranslation() throws IOException {
-        assertEquals(Builders.mapBuilder()
+        assertEquals(ImmutableNodes.newSystemMapBuilder()
             .withNodeIdentifier(new NodeIdentifier(LST))
-            .withChild(Builders.mapEntryBuilder()
+            .withChild(ImmutableNodes.newMapEntryBuilder()
                 .withNodeIdentifier(NodeIdentifierWithPredicates.of(LST, ImmutableMap.<QName, Object>builder()
                     .put(A, (byte) 1)
                     .put(B, (short) 1)
@@ -78,9 +76,9 @@ public class UintAdaptingPrunerTest {
                 .withChild(ImmutableNodes.leafNode(H, Uint64.ONE))
                 .build())
             .build(),
-            prune(Builders.mapBuilder()
+            prune(ImmutableNodes.newSystemMapBuilder()
                 .withNodeIdentifier(new NodeIdentifier(LST))
-                .withChild(Builders.mapEntryBuilder()
+                .withChild(ImmutableNodes.newMapEntryBuilder()
                     .withNodeIdentifier(NodeIdentifierWithPredicates.of(LST,  ImmutableMap.<QName, Object>builder()
                         .put(A, (byte) 1)
                         .put(B, (short) 1)
@@ -105,7 +103,7 @@ public class UintAdaptingPrunerTest {
 
     @Test
     public void testContainerTranslation() throws IOException {
-        assertEquals(Builders.containerBuilder()
+        assertEquals(ImmutableNodes.newContainerBuilder()
             .withNodeIdentifier(new NodeIdentifier(CONT))
             .withChild(ImmutableNodes.leafNode(A, (byte) 1))
             .withChild(ImmutableNodes.leafNode(B, (short) 1))
@@ -116,7 +114,7 @@ public class UintAdaptingPrunerTest {
             .withChild(ImmutableNodes.leafNode(G, Uint32.ONE))
             .withChild(ImmutableNodes.leafNode(H, Uint64.ONE))
             .build(),
-            prune(Builders.containerBuilder()
+            prune(ImmutableNodes.newContainerBuilder()
                 .withNodeIdentifier(new NodeIdentifier(CONT))
                 .withChild(ImmutableNodes.leafNode(A, (byte) 1))
                 .withChild(ImmutableNodes.leafNode(B, (short) 1))
@@ -131,85 +129,60 @@ public class UintAdaptingPrunerTest {
 
     @Test
     public void testLeafList8() throws IOException {
-        assertEquals(Builders.leafSetBuilder()
+        assertEquals(ImmutableNodes.newSystemLeafSetBuilder()
             .withNodeIdentifier(new NodeIdentifier(LFLST8))
-            .withChild(Builders.leafSetEntryBuilder()
-                .withNodeIdentifier(new NodeWithValue<>(LFLST8, Uint8.ONE))
-                .withValue(Uint8.ONE)
-                .build())
+            .withChildValue(Uint8.ONE)
             .build(),
-            prune(Builders.leafSetBuilder()
+            prune(ImmutableNodes.newSystemLeafSetBuilder()
                 .withNodeIdentifier(new NodeIdentifier(LFLST8))
-                .withChild(Builders.leafSetEntryBuilder()
-                    .withNodeIdentifier(new NodeWithValue<>(LFLST8, (short) 1))
-                    .withValue((short) 1)
-                    .build())
+                .withChildValue((short) 1)
                 .build()));
     }
 
     @Test
     public void testLeafList16() throws IOException {
-        assertEquals(Builders.leafSetBuilder()
+        assertEquals(ImmutableNodes.newSystemLeafSetBuilder()
             .withNodeIdentifier(new NodeIdentifier(LFLST16))
-            .withChild(Builders.leafSetEntryBuilder()
-                .withNodeIdentifier(new NodeWithValue<>(LFLST16, Uint16.ONE))
-                .withValue(Uint16.ONE)
-                .build())
+            .withChildValue(Uint16.ONE)
             .build(),
-            prune(Builders.leafSetBuilder()
+            prune(ImmutableNodes.newSystemLeafSetBuilder()
                 .withNodeIdentifier(new NodeIdentifier(LFLST16))
-                .withChild(Builders.leafSetEntryBuilder()
-                    .withNodeIdentifier(new NodeWithValue<>(LFLST16,  1))
-                    .withValue(1)
-                    .build())
+                .withChildValue(1)
                 .build()));
     }
 
     @Test
     public void testLeafList32() throws IOException {
-        assertEquals(Builders.leafSetBuilder()
+        assertEquals(ImmutableNodes.newSystemLeafSetBuilder()
             .withNodeIdentifier(new NodeIdentifier(LFLST32))
-            .withChild(Builders.leafSetEntryBuilder()
-                .withNodeIdentifier(new NodeWithValue<>(LFLST32, Uint32.ONE))
-                .withValue(Uint32.ONE)
-                .build())
+            .withChildValue(Uint32.ONE)
             .build(),
-            prune(Builders.leafSetBuilder()
+            prune(ImmutableNodes.newSystemLeafSetBuilder()
                 .withNodeIdentifier(new NodeIdentifier(LFLST32))
-                .withChild(Builders.leafSetEntryBuilder()
-                    .withNodeIdentifier(new NodeWithValue<>(LFLST32, 1L))
-                    .withValue(1L)
-                    .build())
+                .withChildValue(1L)
                 .build()));
     }
 
     @Test
     public void testLeafList64() throws IOException {
-        assertEquals(Builders.leafSetBuilder()
+        assertEquals(ImmutableNodes.newSystemLeafSetBuilder()
             .withNodeIdentifier(new NodeIdentifier(LFLST64))
-            .withChild(Builders.leafSetEntryBuilder()
-                .withNodeIdentifier(new NodeWithValue<>(LFLST64, Uint64.ONE))
-                .withValue(Uint64.ONE)
-                .build())
+            .withChildValue(Uint64.ONE)
             .build(),
-            prune(Builders.leafSetBuilder()
+            prune(ImmutableNodes.newSystemLeafSetBuilder()
                 .withNodeIdentifier(new NodeIdentifier(LFLST64))
-                .withChild(Builders.leafSetEntryBuilder()
-                    .withNodeIdentifier(new NodeWithValue<>(LFLST64, BigInteger.ONE))
-                    .withValue(BigInteger.ONE)
-                    .build())
+                .withChildValue(BigInteger.ONE)
                 .build()));
     }
 
     private static NormalizedNode prune(final NormalizedNode node) throws IOException {
-        final ReusableNormalizedNodePruner pruner = ReusableNormalizedNodePruner.forSchemaContext(CONTEXT)
-                .withUintAdaption();
-        pruner.initializeForPath(YangInstanceIdentifier.create(node.getIdentifier()));
+        final var pruner = ReusableNormalizedNodePruner.forSchemaContext(CONTEXT).withUintAdaption();
+        pruner.initializeForPath(YangInstanceIdentifier.of(node.name()));
 
         try (NormalizedNodeWriter writer = NormalizedNodeWriter.forStreamWriter(pruner)) {
             writer.write(node);
         }
         pruner.close();
-        return pruner.getResult().get();
+        return pruner.getResult().orElseThrow();
     }
 }
index 7120c31de00ae88d311bfe699ac5d8efbcc9466c..37d102d0f352e98aaf5382290a2c751091d57528 100644 (file)
@@ -10,39 +10,22 @@ package org.opendaylight.controller.cluster.datastore.util;
 import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntry;
 import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntryBuilder;
 import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapNodeBuilder;
+import static org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes.leafNode;
 
-import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableSet;
 import java.io.InputStream;
-import java.math.BigDecimal;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
+import java.util.List;
+import org.opendaylight.yangtools.yang.common.Decimal64;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.common.Uint64;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
-import org.opendaylight.yangtools.yang.data.api.schema.AugmentationNode;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.SystemMapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.UnkeyedListEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.builder.CollectionNodeBuilder;
 import org.opendaylight.yangtools.yang.data.api.schema.builder.DataContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.api.schema.builder.NormalizedNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetEntryNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableMapEntryNodeBuilder;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.opendaylight.yangtools.yang.test.util.YangParserTestUtils;
 
@@ -185,123 +168,69 @@ public final class TestModel {
     }
 
     public static DataContainerNodeBuilder<NodeIdentifier, ContainerNode> createBaseTestContainerBuilder() {
-        // Create a list of shoes
-        // This is to test leaf list entry
-        final LeafSetEntryNode<Object> nike = ImmutableLeafSetEntryNodeBuilder.create().withNodeIdentifier(
-                new NodeWithValue<>(SHOE_QNAME, "nike")).withValue("nike").build();
-
-        final LeafSetEntryNode<Object> puma = ImmutableLeafSetEntryNodeBuilder.create().withNodeIdentifier(
-                new NodeWithValue<>(SHOE_QNAME, "puma")).withValue("puma").build();
-
-        final LeafSetNode<Object> shoes = ImmutableLeafSetNodeBuilder.create().withNodeIdentifier(
-                new NodeIdentifier(SHOE_QNAME)).withChild(nike).withChild(puma).build();
-
-        // Test a leaf-list where each entry contains an identity
-        final LeafSetEntryNode<Object> cap1 =
-                ImmutableLeafSetEntryNodeBuilder
-                        .create()
-                        .withNodeIdentifier(
-                                new NodeWithValue<>(QName.create(
-                                        TEST_QNAME, "capability"), DESC_QNAME))
-                        .withValue(DESC_QNAME).build();
-
-        final LeafSetNode<Object> capabilities =
-                ImmutableLeafSetNodeBuilder
-                        .create()
-                        .withNodeIdentifier(
-                                new NodeIdentifier(QName.create(
-                                        TEST_QNAME, "capability"))).withChild(cap1).build();
-
-        ContainerNode switchFeatures =
-                ImmutableContainerNodeBuilder
-                        .create()
-                        .withNodeIdentifier(
-                                new NodeIdentifier(SWITCH_FEATURES_QNAME))
-                        .withChild(capabilities).build();
-
-        // Create a leaf list with numbers
-        final LeafSetEntryNode<Object> five =
-                ImmutableLeafSetEntryNodeBuilder
-                        .create()
-                        .withNodeIdentifier(
-                                new NodeWithValue<>(QName.create(
-                                        TEST_QNAME, "number"), 5)).withValue(5).build();
-        final LeafSetEntryNode<Object> fifteen =
-                ImmutableLeafSetEntryNodeBuilder
-                        .create()
-                        .withNodeIdentifier(
-                                new NodeWithValue<>(QName.create(
-                                        TEST_QNAME, "number"), 15)).withValue(15).build();
-        final LeafSetNode<Object> numbers =
-                ImmutableLeafSetNodeBuilder
-                        .create()
-                        .withNodeIdentifier(
-                                new NodeIdentifier(QName.create(
-                                        TEST_QNAME, "number"))).withChild(five).withChild(fifteen)
-                        .build();
-
-
-        // Create augmentations
-        MapEntryNode augMapEntry = createAugmentedListEntry(1, "First Test");
-
-        // Create a bits leaf
-        NormalizedNodeBuilder<NodeIdentifier, Object, LeafNode<Object>>
-                myBits = Builders.leafBuilder()
-                .withNodeIdentifier(new NodeIdentifier(QName.create(TEST_QNAME, "my-bits")))
-                .withValue(ImmutableSet.of("foo", "bar"));
-
-        // Create unkeyed list entry
-        UnkeyedListEntryNode unkeyedListEntry = Builders.unkeyedListEntryBuilder()
-                .withNodeIdentifier(new NodeIdentifier(UNKEYED_LIST_QNAME))
-                .withChild(ImmutableNodes.leafNode(NAME_QNAME, "unkeyed-entry-name"))
-                .build();
-
         // Create YangInstanceIdentifier with all path arg types.
-        YangInstanceIdentifier instanceID = YangInstanceIdentifier.create(
-                new NodeIdentifier(QName.create(TEST_QNAME, "qname")),
-                NodeIdentifierWithPredicates.of(QName.create(TEST_QNAME, "list-entry"),
-                        QName.create(TEST_QNAME, "key"), 10),
-                new AugmentationIdentifier(ImmutableSet.of(
-                        QName.create(TEST_QNAME, "aug1"), QName.create(TEST_QNAME, "aug2"))),
-                new NodeWithValue<>(QName.create(TEST_QNAME, "leaf-list-entry"), "foo"));
-
-        Map<QName, Object> keyValues = new HashMap<>();
-        keyValues.put(CHILDREN_QNAME, FIRST_CHILD_NAME);
-
+        YangInstanceIdentifier instanceID = YangInstanceIdentifier.of(
+            new NodeIdentifier(QName.create(TEST_QNAME, "qname")),
+            NodeIdentifierWithPredicates.of(QName.create(TEST_QNAME, "list-entry"),
+                QName.create(TEST_QNAME, "key"), 10),
+            new NodeWithValue<>(QName.create(TEST_QNAME, "leaf-list-entry"), "foo"));
 
         // Create the document
-        return ImmutableContainerNodeBuilder
-                .create()
-                .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
-                .withChild(myBits.build())
-                .withChild(ImmutableNodes.leafNode(DESC_QNAME, DESC))
-                .withChild(ImmutableNodes.leafNode(BOOLEAN_LEAF_QNAME, ENABLED))
-                .withChild(ImmutableNodes.leafNode(SHORT_LEAF_QNAME, SHORT_ID))
-                .withChild(ImmutableNodes.leafNode(BYTE_LEAF_QNAME, BYTE_ID))
-                .withChild(ImmutableNodes.leafNode(TestModel.BIGINTEGER_LEAF_QNAME, Uint64.valueOf(100)))
-                .withChild(ImmutableNodes.leafNode(TestModel.BIGDECIMAL_LEAF_QNAME, BigDecimal.valueOf(1.2)))
-                .withChild(ImmutableNodes.leafNode(SOME_REF_QNAME, instanceID))
-                .withChild(ImmutableNodes.leafNode(MYIDENTITY_QNAME, DESC_QNAME))
-                .withChild(Builders.unkeyedListBuilder()
-                        .withNodeIdentifier(new NodeIdentifier(UNKEYED_LIST_QNAME))
-                        .withChild(unkeyedListEntry).build())
-                .withChild(Builders.choiceBuilder()
-                        .withNodeIdentifier(new NodeIdentifier(TWO_THREE_QNAME))
-                        .withChild(ImmutableNodes.leafNode(TWO_QNAME, "two")).build())
-                .withChild(Builders.orderedMapBuilder()
-                        .withNodeIdentifier(new NodeIdentifier(ORDERED_LIST_QNAME))
-                        .withValue(ImmutableList.<MapEntryNode>builder().add(
-                                mapEntryBuilder(ORDERED_LIST_QNAME, ORDERED_LIST_ENTRY_QNAME, "1").build(),
-                                mapEntryBuilder(ORDERED_LIST_QNAME, ORDERED_LIST_ENTRY_QNAME, "2").build()).build())
-                        .build())
-                .withChild(shoes)
-                .withChild(numbers)
-                .withChild(switchFeatures)
-                .withChild(mapNodeBuilder(AUGMENTED_LIST_QNAME).withChild(augMapEntry).build())
-                .withChild(mapNodeBuilder(OUTER_LIST_QNAME)
-                                .withChild(mapEntry(OUTER_LIST_QNAME, ID_QNAME, ONE_ID))
-                                .withChild(BAR_NODE).build()
-                );
+        return ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+            // Create a bits leaf
+            .withChild(leafNode(QName.create(TEST_QNAME, "my-bits"), ImmutableSet.of("foo", "bar")))
+            .withChild(leafNode(DESC_QNAME, DESC))
+            .withChild(leafNode(BOOLEAN_LEAF_QNAME, ENABLED))
+            .withChild(leafNode(SHORT_LEAF_QNAME, SHORT_ID))
+            .withChild(leafNode(BYTE_LEAF_QNAME, BYTE_ID))
+            .withChild(leafNode(TestModel.BIGINTEGER_LEAF_QNAME, Uint64.valueOf(100)))
+            .withChild(leafNode(TestModel.BIGDECIMAL_LEAF_QNAME, Decimal64.valueOf("1.2").scaleTo(2)))
+            .withChild(leafNode(SOME_REF_QNAME, instanceID))
+            .withChild(leafNode(MYIDENTITY_QNAME, DESC_QNAME))
+            .withChild(ImmutableNodes.newUnkeyedListBuilder()
+                .withNodeIdentifier(new NodeIdentifier(UNKEYED_LIST_QNAME))
+                // Create unkeyed list entry
+                .withChild(ImmutableNodes.newUnkeyedListEntryBuilder()
+                    .withNodeIdentifier(new NodeIdentifier(UNKEYED_LIST_QNAME))
+                    .withChild(leafNode(NAME_QNAME, "unkeyed-entry-name"))
+                    .build())
+                .build())
+            .withChild(ImmutableNodes.newChoiceBuilder()
+                .withNodeIdentifier(new NodeIdentifier(TWO_THREE_QNAME))
+                .withChild(leafNode(TWO_QNAME, "two")).build())
+            .withChild(ImmutableNodes.newUserMapBuilder()
+                .withNodeIdentifier(new NodeIdentifier(ORDERED_LIST_QNAME))
+                .withValue(List.of(
+                    mapEntryBuilder(ORDERED_LIST_QNAME, ORDERED_LIST_ENTRY_QNAME, "1").build(),
+                    mapEntryBuilder(ORDERED_LIST_QNAME, ORDERED_LIST_ENTRY_QNAME, "2").build()))
+                .build())
+            .withChild(ImmutableNodes.newSystemLeafSetBuilder()
+                .withNodeIdentifier(new NodeIdentifier(SHOE_QNAME))
+                .withChildValue("nike")
+                .withChildValue("puma")
+                .build())
+            .withChild(ImmutableNodes.newSystemLeafSetBuilder()
+                .withNodeIdentifier(new NodeIdentifier(QName.create(TEST_QNAME, "number")))
+                .withChildValue(5)
+                .withChildValue(15)
+                .build())
+            .withChild(ImmutableNodes.newContainerBuilder()
+                .withNodeIdentifier(new NodeIdentifier(SWITCH_FEATURES_QNAME))
+                // Test a leaf-list where each entry contains an identity
+                .withChild(ImmutableNodes.newSystemLeafSetBuilder()
+                    .withNodeIdentifier(new NodeIdentifier(QName.create(TEST_QNAME, "capability")))
+                    .withChildValue(DESC_QNAME)
+                    .build())
+                .build())
+            .withChild(mapNodeBuilder(AUGMENTED_LIST_QNAME)
+                // Create augmentations
+                .withChild(createAugmentedListEntry(1, "First Test"))
+                .build())
+            .withChild(mapNodeBuilder(OUTER_LIST_QNAME)
+                .withChild(mapEntry(OUTER_LIST_QNAME, ID_QNAME, ONE_ID))
+                .withChild(BAR_NODE)
+                .build());
     }
 
     public static ContainerNode createTestContainer() {
@@ -309,76 +238,41 @@ public final class TestModel {
     }
 
     public static MapEntryNode createAugmentedListEntry(final int id, final String name) {
-        Set<QName> childAugmentations = new HashSet<>();
-        childAugmentations.add(AUG_CONT_QNAME);
-
-        ContainerNode augCont = ImmutableContainerNodeBuilder.create()
-                        .withNodeIdentifier(new NodeIdentifier(AUG_CONT_QNAME))
-                        .withChild(ImmutableNodes.leafNode(AUG_NAME_QNAME, name))
-                        .build();
-
-
-        final AugmentationIdentifier augmentationIdentifier = new AugmentationIdentifier(childAugmentations);
-        final AugmentationNode augmentationNode =
-                Builders.augmentationBuilder()
-                        .withNodeIdentifier(augmentationIdentifier).withChild(augCont)
-                        .build();
-
-        return ImmutableMapEntryNodeBuilder.create()
-                .withNodeIdentifier(NodeIdentifierWithPredicates.of(AUGMENTED_LIST_QNAME, ID_QNAME, id))
-                .withChild(ImmutableNodes.leafNode(ID_QNAME, id))
-                .withChild(augmentationNode).build();
+        return ImmutableNodes.newMapEntryBuilder()
+            .withNodeIdentifier(NodeIdentifierWithPredicates.of(AUGMENTED_LIST_QNAME, ID_QNAME, id))
+            .withChild(leafNode(ID_QNAME, id))
+            .withChild(ImmutableNodes.newContainerBuilder()
+                .withNodeIdentifier(new NodeIdentifier(AUG_CONT_QNAME))
+                .withChild(leafNode(AUG_NAME_QNAME, name))
+                .build())
+            .build();
     }
 
     public static ContainerNode createFamily() {
-        final DataContainerNodeBuilder<NodeIdentifier, ContainerNode>
-            familyContainerBuilder = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                        new NodeIdentifier(FAMILY_QNAME));
-
-        final CollectionNodeBuilder<MapEntryNode, SystemMapNode> childrenBuilder = mapNodeBuilder()
-            .withNodeIdentifier(new NodeIdentifier(CHILDREN_QNAME));
-
-        final DataContainerNodeBuilder<NodeIdentifierWithPredicates, MapEntryNode>
-            firstChildBuilder = mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, FIRST_CHILD_ID);
-        final DataContainerNodeBuilder<NodeIdentifierWithPredicates, MapEntryNode>
-            secondChildBuilder = mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, SECOND_CHILD_ID);
-
-        final DataContainerNodeBuilder<NodeIdentifierWithPredicates, MapEntryNode>
-            firstGrandChildBuilder = mapEntryBuilder(GRAND_CHILDREN_QNAME, GRAND_CHILD_NUMBER_QNAME,
-                    FIRST_GRAND_CHILD_ID);
-        final DataContainerNodeBuilder<NodeIdentifierWithPredicates, MapEntryNode>
-            secondGrandChildBuilder = mapEntryBuilder(GRAND_CHILDREN_QNAME, GRAND_CHILD_NUMBER_QNAME,
-                    SECOND_GRAND_CHILD_ID);
-
-        firstGrandChildBuilder
-                .withChild(
-                        ImmutableNodes.leafNode(GRAND_CHILD_NUMBER_QNAME,
-                                FIRST_GRAND_CHILD_ID)).withChild(
-                ImmutableNodes.leafNode(GRAND_CHILD_NAME_QNAME,
-                        FIRST_GRAND_CHILD_NAME));
-
-        secondGrandChildBuilder.withChild(
-                ImmutableNodes.leafNode(GRAND_CHILD_NUMBER_QNAME, SECOND_GRAND_CHILD_ID))
-                .withChild(ImmutableNodes.leafNode(GRAND_CHILD_NAME_QNAME, SECOND_GRAND_CHILD_NAME));
-
-        firstChildBuilder
-                .withChild(ImmutableNodes.leafNode(CHILD_NUMBER_QNAME, FIRST_CHILD_ID))
-                .withChild(ImmutableNodes.leafNode(CHILD_NAME_QNAME, FIRST_CHILD_NAME))
-                .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME)
-                    .withChild(firstGrandChildBuilder.build())
-                    .build());
-
-
-        secondChildBuilder
-                .withChild(ImmutableNodes.leafNode(CHILD_NUMBER_QNAME, SECOND_CHILD_ID))
-                .withChild(ImmutableNodes.leafNode(CHILD_NAME_QNAME, SECOND_CHILD_NAME))
-                .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME)
-                    .withChild(firstGrandChildBuilder.build())
-                    .build());
-
-        childrenBuilder.withChild(firstChildBuilder.build());
-        childrenBuilder.withChild(secondChildBuilder.build());
-
-        return familyContainerBuilder.withChild(childrenBuilder.build()).build();
+        final var firstGrandChildBuilder = mapEntryBuilder(
+                GRAND_CHILDREN_QNAME, GRAND_CHILD_NUMBER_QNAME, FIRST_GRAND_CHILD_ID)
+            .withChild(leafNode(GRAND_CHILD_NUMBER_QNAME,FIRST_GRAND_CHILD_ID))
+            .withChild(leafNode(GRAND_CHILD_NAME_QNAME, FIRST_GRAND_CHILD_NAME));
+
+        return ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(FAMILY_QNAME))
+            .withChild(ImmutableNodes.newSystemMapBuilder()
+                .withNodeIdentifier(new NodeIdentifier(CHILDREN_QNAME))
+                .withChild(mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, FIRST_CHILD_ID)
+                    .withChild(leafNode(CHILD_NUMBER_QNAME, FIRST_CHILD_ID))
+                    .withChild(leafNode(CHILD_NAME_QNAME, FIRST_CHILD_NAME))
+                    .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME)
+                        .withChild(firstGrandChildBuilder.build())
+                        .build())
+                    .build())
+                .withChild(mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, SECOND_CHILD_ID)
+                    .withChild(leafNode(CHILD_NUMBER_QNAME, SECOND_CHILD_ID))
+                    .withChild(leafNode(CHILD_NAME_QNAME, SECOND_CHILD_NAME))
+                    .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME)
+                        .withChild(firstGrandChildBuilder.build())
+                        .build())
+                    .build())
+                .build())
+            .build();
     }
 }
index 56c78f1c70207e2c7833c7ee9b09c9adf2a4342c..a93b6a8baafc54ed8305890ae330d68c6dc68675 100644 (file)
@@ -70,8 +70,8 @@ public class ChunkedOutputStreamTest {
         }
 
         int counter = 0;
-        for (byte[] chunk: assertFinishedStream(size, 2)) {
-            for (byte actual: chunk) {
+        for (byte[] chunk : assertFinishedStream(size, 2)) {
+            for (byte actual : chunk) {
                 assertEquals((byte) counter++, actual);
             }
         }
index ff5d61b41d6b8a65f77524df99a7daec3111fc8a..da5c5b6763b806540074be6b193a74ccbeb7ae98 100644 (file)
@@ -18,7 +18,6 @@ import com.google.common.base.Stopwatch;
 import com.google.common.util.concurrent.Uninterruptibles;
 import java.io.File;
 import java.io.IOException;
-import java.io.InputStream;
 import java.util.Arrays;
 import java.util.concurrent.TimeUnit;
 import org.junit.After;
@@ -98,16 +97,14 @@ public class FileBackedOutputStreamTest {
             assertEquals("Temp file", tempFileName, findTempFileName(TEMP_DIR));
             assertEquals("Size", bytes.length, fbos.asByteSource().size());
 
-            InputStream inputStream = fbos.asByteSource().openStream();
+            try (var inputStream = fbos.asByteSource().openStream()) {
+                assertArrayEquals("Read bytes", bytes, fbos.asByteSource().read());
 
-            assertArrayEquals("Read bytes", bytes, fbos.asByteSource().read());
-
-            byte[] inBytes = new byte[bytes.length];
-            assertEquals("# bytes read", bytes.length, inputStream.read(inBytes));
-            assertArrayEquals("Read InputStream", bytes, inBytes);
-            assertEquals("End of stream", -1, inputStream.read());
-
-            inputStream.close();
+                byte[] inBytes = new byte[bytes.length];
+                assertEquals("# bytes read", bytes.length, inputStream.read(inBytes));
+                assertArrayEquals("Read InputStream", bytes, inBytes);
+                assertEquals("End of stream", -1, inputStream.read());
+            }
 
             fbos.cleanup();
 
@@ -182,27 +179,27 @@ public class FileBackedOutputStreamTest {
         fail("Temp file was not deleted");
     }
 
-    static String findTempFileName(String dirPath) {
+    static String findTempFileName(final String dirPath) {
         String[] files = new File(dirPath).list();
         assertNotNull(files);
         assertTrue("Found more than one temp file: " + Arrays.toString(files), files.length < 2);
         return files.length == 1 ? files[0] : null;
     }
 
-    static boolean deleteFile(String file) {
+    static boolean deleteFile(final String file) {
         return new File(file).delete();
     }
 
-    static void deleteTempFiles(String path) {
+    static void deleteTempFiles(final String path) {
         String[] files = new File(path).list();
         if (files != null) {
-            for (String file: files) {
+            for (String file : files) {
                 deleteFile(path + File.separator + file);
             }
         }
     }
 
-    static void createDir(String path) {
+    static void createDir(final String path) {
         File dir = new File(path);
         if (!dir.exists() && !dir.mkdirs()) {
             throw new RuntimeException("Failed to create temp dir " + path);
index 4441857f39c7fab27cd357f34ae5bcb06b993922..af0027bdfecbd0b36fd02dfa107ede79c3b8e53d 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.messaging;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -22,7 +22,7 @@ public class AbortSlicingTest {
     @Test
     public void testSerialization() {
         AbortSlicing expected = new AbortSlicing(new StringIdentifier("test"));
-        AbortSlicing cloned = (AbortSlicing) SerializationUtils.clone(expected);
+        AbortSlicing cloned = SerializationUtils.clone(expected);
         assertEquals("getIdentifier", expected.getIdentifier(), cloned.getIdentifier());
     }
 }
index 17b54a045dfb74d9437e41606c97d4492fa9f3e5..128a0442e3d4112b4a14df7b9c6da061d3285eec 100644 (file)
@@ -105,7 +105,7 @@ public class MessageAssemblerTest extends AbstractMessagingTest {
 
             final MessageSliceReply reply = testProbe.expectMsgClass(MessageSliceReply.class);
             assertFailedMessageSliceReply(reply, IDENTIFIER, false);
-            assertEquals("Failure cause", mockFailure, reply.getFailure().get().getCause());
+            assertEquals("Failure cause", mockFailure, reply.getFailure().orElseThrow().getCause());
 
             assertFalse("MessageAssembler did not remove state for " + identifier, assembler.hasState(identifier));
             verify(mockFiledBackedStream).cleanup();
@@ -130,7 +130,7 @@ public class MessageAssemblerTest extends AbstractMessagingTest {
 
             final MessageSliceReply reply = testProbe.expectMsgClass(MessageSliceReply.class);
             assertFailedMessageSliceReply(reply, IDENTIFIER, false);
-            assertEquals("Failure cause", mockFailure, reply.getFailure().get().getCause());
+            assertEquals("Failure cause", mockFailure, reply.getFailure().orElseThrow().getCause());
 
             assertFalse("MessageAssembler did not remove state for " + identifier, assembler.hasState(identifier));
             verify(mockFiledBackedStream).cleanup();
@@ -173,11 +173,11 @@ public class MessageAssemblerTest extends AbstractMessagingTest {
         }
     }
 
-    private MessageAssembler newMessageAssembler(String logContext) {
+    private MessageAssembler newMessageAssembler(final String logContext) {
         return newMessageAssemblerBuilder(logContext).build();
     }
 
-    private Builder newMessageAssemblerBuilder(String logContext) {
+    private Builder newMessageAssemblerBuilder(final String logContext) {
         return MessageAssembler.builder().fileBackedStreamFactory(mockFiledBackedStreamFactory)
                 .assembledMessageCallback(mockAssembledMessageCallback).logContext(logContext);
     }
index 9c80033b92014bd08b43a4bbdfd4a321fc5dd780..c5dbcdd13cdf084493142859e85fdeefb334c42e 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.messaging;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -22,7 +22,7 @@ public class MessageSliceIdentifierTest {
     @Test
     public void testSerialization() {
         MessageSliceIdentifier expected = new MessageSliceIdentifier(new StringIdentifier("test"), 123L);
-        MessageSliceIdentifier cloned = (MessageSliceIdentifier) SerializationUtils.clone(expected);
+        MessageSliceIdentifier cloned = SerializationUtils.clone(expected);
         assertEquals("cloned", expected, cloned);
         assertEquals("getClientIdentifier", expected.getClientIdentifier(), cloned.getClientIdentifier());
         assertEquals("getSlicerId", expected.getSlicerId(), cloned.getSlicerId());
index 51c4479119f40618b0e15cc3bfd0744854569282..8b661f68d7e9f5275666a645edbd9f603753b42b 100644 (file)
@@ -16,7 +16,7 @@ import akka.actor.ExtendedActorSystem;
 import akka.serialization.JavaSerializer;
 import akka.testkit.TestProbe;
 import akka.testkit.javadsl.TestKit;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -48,7 +48,7 @@ public class MessageSliceReplyTest {
     private void testSuccess() {
         MessageSliceReply expected = MessageSliceReply.success(new StringIdentifier("test"), 3,
                 TestProbe.apply(actorSystem).ref());
-        MessageSliceReply cloned = (MessageSliceReply) SerializationUtils.clone(expected);
+        MessageSliceReply cloned = SerializationUtils.clone(expected);
 
         assertEquals("getIdentifier", expected.getIdentifier(), cloned.getIdentifier());
         assertEquals("getSliceIndex", expected.getSliceIndex(), cloned.getSliceIndex());
@@ -59,15 +59,15 @@ public class MessageSliceReplyTest {
     private void testFailure() {
         MessageSliceReply expected = MessageSliceReply.failed(new StringIdentifier("test"),
                 new MessageSliceException("mock", true), TestProbe.apply(actorSystem).ref());
-        MessageSliceReply cloned = (MessageSliceReply) SerializationUtils.clone(expected);
+        MessageSliceReply cloned = SerializationUtils.clone(expected);
 
         assertEquals("getIdentifier", expected.getIdentifier(), cloned.getIdentifier());
         assertEquals("getSliceIndex", expected.getSliceIndex(), cloned.getSliceIndex());
         assertEquals("getSendTo", expected.getSendTo(), cloned.getSendTo());
         assertTrue("getFailure present", cloned.getFailure().isPresent());
-        assertEquals("getFailure message", expected.getFailure().get().getMessage(),
-                cloned.getFailure().get().getMessage());
-        assertEquals("getFailure isRetriable", expected.getFailure().get().isRetriable(),
-                cloned.getFailure().get().isRetriable());
+        assertEquals("getFailure message", expected.getFailure().orElseThrow().getMessage(),
+                cloned.getFailure().orElseThrow().getMessage());
+        assertEquals("getFailure isRetriable", expected.getFailure().orElseThrow().isRetriable(),
+                cloned.getFailure().orElseThrow().isRetriable());
     }
 }
index dc2e6de9d75a2fef8dff83b962751fd9008beb6b..afb764091ca43a63281e8b0db8c3d947905e3c51 100644 (file)
@@ -15,7 +15,7 @@ import akka.actor.ExtendedActorSystem;
 import akka.serialization.JavaSerializer;
 import akka.testkit.TestProbe;
 import akka.testkit.javadsl.TestKit;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -35,7 +35,7 @@ public class MessageSliceTest {
 
     @After
     public void tearDown() {
-        TestKit.shutdownActorSystem(actorSystem, Boolean.TRUE);
+        TestKit.shutdownActorSystem(actorSystem, true);
     }
 
     @Test
@@ -50,7 +50,7 @@ public class MessageSliceTest {
 
         MessageSlice expected = new MessageSlice(new StringIdentifier("test"), data, 2, 3, 54321,
                 TestProbe.apply(actorSystem).ref());
-        MessageSlice cloned = (MessageSlice) SerializationUtils.clone(expected);
+        MessageSlice cloned = SerializationUtils.clone(expected);
 
         assertEquals("getIdentifier", expected.getIdentifier(), cloned.getIdentifier());
         assertEquals("getSliceIndex", expected.getSliceIndex(), cloned.getSliceIndex());
index 61c06177991613d2013f2b1b4d0c96fc9e901664..63b37e12b0bc436708d4a9e84de0eb6998022400 100644 (file)
@@ -328,7 +328,7 @@ public class MessageSlicingIntegrationTest {
         assertEquals("Identifier", identifier, ((MessageSliceIdentifier)reply.getIdentifier())
                 .getClientIdentifier());
         assertEquals("Failure present", Boolean.TRUE, reply.getFailure().isPresent());
-        assertEquals("isRetriable", isRetriable, reply.getFailure().get().isRetriable());
+        assertEquals("isRetriable", isRetriable, reply.getFailure().orElseThrow().isRetriable());
     }
 
     static void assertMessageSlice(final MessageSlice sliceMessage, final Identifier identifier, final int sliceIndex,
index d45042567d9fa16d2b2a5b1ec2dc49038c405581..c9ab83e762977a5814d838e243c238b63ed87aa7 100644 (file)
@@ -35,7 +35,7 @@ import java.io.IOException;
 import java.net.URLEncoder;
 import java.nio.charset.StandardCharsets;
 import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
index 2a8ddd2e560a39cbace5c16d49eda489c2ec14cc..611bebfd61e6330edc29efb31664406e56edb002 100644 (file)
@@ -7,34 +7,28 @@
  */
 package org.opendaylight.controller.cluster.schema.provider.impl;
 
-import static org.hamcrest.MatcherAssert.assertThat;
-import static org.hamcrest.Matchers.instanceOf;
-import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertThrows;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 
 import akka.dispatch.ExecutionContexts;
 import akka.dispatch.Futures;
 import com.google.common.io.CharSource;
-import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.MoreExecutors;
 import java.io.IOException;
-import java.nio.charset.StandardCharsets;
 import java.util.concurrent.ExecutionException;
 import org.junit.Before;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.schema.provider.RemoteYangTextSourceProvider;
-import org.opendaylight.yangtools.yang.common.Revision;
-import org.opendaylight.yangtools.yang.model.repo.api.RevisionSourceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier;
 import org.opendaylight.yangtools.yang.model.repo.api.SchemaSourceException;
-import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
-import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.opendaylight.yangtools.yang.model.spi.source.DelegatedYangTextSource;
 
 public class RemoteSchemaProviderTest {
-    private static final SourceIdentifier ID = RevisionSourceIdentifier.create("Test", Revision.of("2015-10-30"));
+    private static final SourceIdentifier ID = new SourceIdentifier("Test", "2015-10-30");
 
     private RemoteSchemaProvider remoteSchemaProvider;
     private RemoteYangTextSourceProvider mockedRemoteSchemaRepository;
@@ -48,28 +42,24 @@ public class RemoteSchemaProviderTest {
 
     @Test
     public void getExistingYangTextSchemaSource() throws IOException, InterruptedException, ExecutionException {
-        YangTextSchemaSource schemaSource = YangTextSchemaSource.delegateForByteSource(ID,
-            CharSource.wrap("Test").asByteSource(StandardCharsets.UTF_8));
+        final var schemaSource = new DelegatedYangTextSource(ID, CharSource.wrap("Test"));
         doReturn(Futures.successful(new YangTextSchemaSourceSerializationProxy(schemaSource)))
             .when(mockedRemoteSchemaRepository).getYangTextSchemaSource(ID);
 
-        YangTextSchemaSource providedSource = remoteSchemaProvider.getSource(ID).get();
-        assertEquals(ID, providedSource.getIdentifier());
-        assertArrayEquals(schemaSource.read(), providedSource.read());
+        final var providedSource = remoteSchemaProvider.getSource(ID).get();
+        assertEquals(ID, providedSource.sourceId());
+        assertEquals(schemaSource.read(), providedSource.read());
     }
 
     @Test
     public void getNonExistingSchemaSource() throws InterruptedException {
-        doReturn(Futures.failed(new SchemaSourceException("Source not provided")))
-            .when(mockedRemoteSchemaRepository).getYangTextSchemaSource(ID);
+        final var exception = new SchemaSourceException(ID, "Source not provided");
+        doReturn(Futures.failed(exception)).when(mockedRemoteSchemaRepository).getYangTextSchemaSource(ID);
 
-        ListenableFuture<YangTextSchemaSource> sourceFuture = remoteSchemaProvider.getSource(ID);
+        final var sourceFuture = remoteSchemaProvider.getSource(ID);
         assertTrue(sourceFuture.isDone());
-        try {
-            sourceFuture.get();
-            fail("Expected a failure to occur");
-        } catch (ExecutionException e) {
-            assertThat(e.getCause(), instanceOf(SchemaSourceException.class));
-        }
+
+        final var cause = assertThrows(ExecutionException.class, sourceFuture::get).getCause();
+        assertSame(exception, cause);
     }
 }
index 1fda858fdd59a5c8c60ebfb0077c6145aa07074a..a63be0aa00e752d44d3704b8d5c30d8105cfcb10 100644 (file)
@@ -7,75 +7,76 @@
  */
 package org.opendaylight.controller.cluster.schema.provider.impl;
 
-import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertThrows;
 import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doReturn;
 
-import com.google.common.io.ByteSource;
+import com.google.common.io.CharSource;
 import com.google.common.util.concurrent.Futures;
 import java.util.Collections;
 import java.util.Set;
 import org.junit.Before;
 import org.junit.Test;
-import org.mockito.Mockito;
-import org.opendaylight.yangtools.yang.common.Revision;
-import org.opendaylight.yangtools.yang.model.repo.api.RevisionSourceIdentifier;
+import org.junit.runner.RunWith;
+import org.mockito.Mock;
+import org.mockito.junit.MockitoJUnitRunner;
+import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.source.YangTextSource;
 import org.opendaylight.yangtools.yang.model.repo.api.SchemaRepository;
 import org.opendaylight.yangtools.yang.model.repo.api.SchemaSourceException;
-import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
-import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.opendaylight.yangtools.yang.model.spi.source.DelegatedYangTextSource;
 import scala.concurrent.Await;
-import scala.concurrent.Future;
 import scala.concurrent.duration.FiniteDuration;
 
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class RemoteYangTextSourceProviderImplTest {
+    private static final SourceIdentifier ID = new SourceIdentifier("Test", "2015-10-30");
 
-    private static final SourceIdentifier ID = RevisionSourceIdentifier.create("Test", Revision.of("2015-10-30"));
+    @Mock
+    private SchemaRepository mockedLocalRepository;
 
     private RemoteYangTextSourceProviderImpl remoteRepository;
-    private SchemaRepository mockedLocalRepository;
     private final Set<SourceIdentifier> providedSources = Collections.singleton(ID);
 
     @Before
     public void setUp() {
-        mockedLocalRepository = Mockito.mock(SchemaRepository.class);
-
         remoteRepository = new RemoteYangTextSourceProviderImpl(mockedLocalRepository, providedSources);
     }
 
     @Test
     public void testGetExistingYangTextSchemaSource() throws Exception {
-        String source = "Test source.";
-        YangTextSchemaSource schemaSource = YangTextSchemaSource.delegateForByteSource(
-                ID, ByteSource.wrap(source.getBytes()));
-        Mockito.when(mockedLocalRepository.getSchemaSource(ID, YangTextSchemaSource.class)).thenReturn(
-                Futures.immediateFuture(schemaSource));
+        var schemaSource = new DelegatedYangTextSource(ID, CharSource.wrap("Test source."));
+
+        doReturn(Futures.immediateFuture(schemaSource)).when(mockedLocalRepository)
+            .getSchemaSource(ID, YangTextSource.class);
 
-        Future<YangTextSchemaSourceSerializationProxy> retrievedSourceFuture =
-                remoteRepository.getYangTextSchemaSource(ID);
+        var retrievedSourceFuture = remoteRepository.getYangTextSchemaSource(ID);
         assertTrue(retrievedSourceFuture.isCompleted());
-        YangTextSchemaSource resultSchemaSource = Await.result(retrievedSourceFuture,
-                FiniteDuration.Zero()).getRepresentation();
-        assertEquals(resultSchemaSource.getIdentifier(), schemaSource.getIdentifier());
-        assertArrayEquals(resultSchemaSource.read(), schemaSource.read());
+        var resultSchemaSource = Await.result(retrievedSourceFuture, FiniteDuration.Zero()).getRepresentation();
+        assertEquals(resultSchemaSource.sourceId(), schemaSource.sourceId());
+        assertEquals(resultSchemaSource.read(), schemaSource.read());
     }
 
-    @Test(expected = SchemaSourceException.class)
+    @Test
     public void testGetNonExistentYangTextSchemaSource() throws Exception {
-        Mockito.when(mockedLocalRepository.getSchemaSource(ID, YangTextSchemaSource.class)).thenReturn(
-                Futures.immediateFailedFuture(new SchemaSourceException("Source is not provided")));
+        final var exception = new SchemaSourceException(ID, "Source is not provided");
+
+        doReturn(Futures.immediateFailedFuture(exception)).when(mockedLocalRepository)
+            .getSchemaSource(ID, YangTextSource.class);
 
-        Future<YangTextSchemaSourceSerializationProxy> retrievedSourceFuture =
-                remoteRepository.getYangTextSchemaSource(ID);
+        var retrievedSourceFuture = remoteRepository.getYangTextSchemaSource(ID);
         assertTrue(retrievedSourceFuture.isCompleted());
-        Await.result(retrievedSourceFuture, FiniteDuration.Zero());
+
+        final var ex = assertThrows(SchemaSourceException.class,
+            () -> Await.result(retrievedSourceFuture, FiniteDuration.Zero()));
+        assertSame(ex, exception);
     }
 
     @Test
     public void testGetProvidedSources() throws Exception {
-        Set<SourceIdentifier> remoteProvidedSources = Await.result(remoteRepository
-                .getProvidedSources(), FiniteDuration.Zero());
+        var remoteProvidedSources = Await.result(remoteRepository.getProvidedSources(), FiniteDuration.Zero());
         assertEquals(providedSources, remoteProvidedSources);
     }
-
 }
index 084fd5242f2276818a92c7b82080cfb38fc4a120..ced954640cd142fbb4f01b63231a61eb37162801 100644 (file)
@@ -5,64 +5,55 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.schema.provider.impl;
 
-import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 
-import com.google.common.io.ByteSource;
+import com.google.common.io.CharSource;
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.ObjectInputStream;
 import java.io.ObjectOutputStream;
-import java.nio.charset.StandardCharsets;
 import org.junit.Before;
 import org.junit.Test;
-import org.opendaylight.yangtools.yang.common.Revision;
-import org.opendaylight.yangtools.yang.model.repo.api.RevisionSourceIdentifier;
-import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.source.YangTextSource;
+import org.opendaylight.yangtools.yang.model.spi.source.DelegatedYangTextSource;
 
 public class YangTextSourceSerializationProxyTest {
-
-    private YangTextSchemaSource schemaSource;
+    private YangTextSource schemaSource;
 
     @Before
     public void setUp() {
-        String source = "Test source.";
-        schemaSource = YangTextSchemaSource.delegateForByteSource(
-                RevisionSourceIdentifier.create("test", Revision.of("2015-10-30")),
-                ByteSource.wrap(source.getBytes(StandardCharsets.UTF_8)));
+        schemaSource = new DelegatedYangTextSource(new SourceIdentifier("test", "2015-10-30"),
+            CharSource.wrap("Test source."));
     }
 
-
     @Test
     public void serializeAndDeserializeProxy() throws ClassNotFoundException, IOException {
-        YangTextSchemaSourceSerializationProxy proxy = new YangTextSchemaSourceSerializationProxy(schemaSource);
+        final var proxy = new YangTextSchemaSourceSerializationProxy(schemaSource);
         ByteArrayOutputStream bos = new ByteArrayOutputStream();
         ObjectOutputStream oos = new ObjectOutputStream(bos);
 
         oos.writeObject(proxy);
 
         final byte[] bytes = bos.toByteArray();
-        assertEquals(353, bytes.length);
+        assertEquals(323, bytes.length);
 
         ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bytes));
 
-        YangTextSchemaSourceSerializationProxy deserializedProxy =
-                (YangTextSchemaSourceSerializationProxy) ois.readObject();
+        final var deserializedProxy = (YangTextSchemaSourceSerializationProxy) ois.readObject();
 
-        assertEquals(deserializedProxy.getRepresentation().getIdentifier(), proxy.getRepresentation().getIdentifier());
-        assertArrayEquals(deserializedProxy.getRepresentation().read(), proxy.getRepresentation().read());
+        assertEquals(deserializedProxy.getRepresentation().sourceId(), proxy.getRepresentation().sourceId());
+        assertEquals(deserializedProxy.getRepresentation().read(), proxy.getRepresentation().read());
     }
 
     @Test
     public void testProxyEqualsBackingYangTextSource() throws IOException {
-        YangTextSchemaSourceSerializationProxy serializationProxy =
-                new YangTextSchemaSourceSerializationProxy(schemaSource);
+        final var serializationProxy = new YangTextSchemaSourceSerializationProxy(schemaSource);
 
-        assertEquals(serializationProxy.getRepresentation().getIdentifier(), schemaSource.getIdentifier());
-        assertArrayEquals(serializationProxy.getRepresentation().read(), schemaSource.read());
+        assertEquals(serializationProxy.getRepresentation().sourceId(), schemaSource.sourceId());
+        assertEquals(serializationProxy.getRepresentation().read(), schemaSource.read());
     }
 }
index af8b64bbb0e2eb5973f7dfa8e045d8e21004e7e4..4c6291230764d62531a6c194c435c11d59beac65 100644 (file)
   <parent>
     <groupId>org.opendaylight.odlparent</groupId>
     <artifactId>odlparent-lite</artifactId>
-    <version>9.0.12</version>
+    <version>13.0.11</version>
     <relativePath/>
   </parent>
 
   <groupId>org.opendaylight.controller</groupId>
   <artifactId>sal-clustering-config</artifactId>
-  <version>5.0.0-SNAPSHOT</version>
+  <version>9.0.3-SNAPSHOT</version>
   <packaging>jar</packaging>
   <description>Configuration files for md-sal clustering</description>
 
index 678903c74c4e37a4e0ea5ade80c305017a7f6512..b03a4a114e0f87a8cd06c913ebce9cb2ec4fb0fc 100644 (file)
@@ -89,11 +89,6 @@ operational.persistent=false
 # for a message slice. This needs to be below Akka's maximum-frame-size and defaults to 480KiB.
 maximum-message-slice-size=491520
 
-# Enable tell-based protocol between frontend (applications) and backend (shards). Using this protocol
-# should avoid AskTimeoutExceptions seen under heavy load. Defaults to false (use tell-based protocol).
-# Set to false to enable ask-based protocol.
-use-tell-based-protocol=true
-
 # Tune the maximum number of entries a follower is allowed to lag behind the leader before it is
 # considered out-of-sync. This flag may require tuning in face of a large number of small transactions.
 #sync-index-threshold=10
index 8f9b5041eeb5c4f58368e2de58545f44064db2d0..9834e08ea89b8776d0521fd3db15ebd210d0e6ad 100644 (file)
@@ -163,6 +163,9 @@ odl-cluster-data {
           max-entry-size = 16M
           # Maximum size of a segment
           max-segment-size = 128M
+          # Maximum number of bytes that are written without synchronizing storage. Defaults to max-entry-size.
+          # Set to <= 0 to flush immediately.
+          #max-unflushed-bytes = 1M
           # Map each segment into memory. Defaults to true, use false to keep a heap-based
           # buffer instead.
           memory-mapped = true
@@ -181,6 +184,9 @@ odl-cluster-data {
           max-entry-size = 512K
           # Maximum size of a segment
           max-segment-size = 1M
+          # Maximum number of bytes that are written without synchronizing storage. Defaults to max-entry-size.
+          # Set to <= 0 to flush immediately.
+          #max-unflushed-bytes = 128K
           # Map each segment into memory. Note that while this can improve performance,
           # it will also place additional burden on system resources.
           memory-mapped = false
index ba89b84cd8a6a1ff6c38f5dec748bea69d1c8c37..a0bf479f16c90da69b96431ebb19a2ae2b6316f7 100644 (file)
@@ -4,7 +4,7 @@
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-parent</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../parent</relativePath>
   </parent>
 
@@ -13,8 +13,8 @@
 
   <dependencies>
     <dependency>
-      <groupId>com.google.guava</groupId>
-      <artifactId>guava</artifactId>
+      <groupId>org.eclipse.jdt</groupId>
+      <artifactId>org.eclipse.jdt.annotation</artifactId>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.yangtools</groupId>
index 223132c95b066ca8307266168b872ca61280a9ca..53d7a2f22a95b65d003a4a2d7e836bc100162143 100644 (file)
@@ -7,7 +7,6 @@
  */
 package org.opendaylight.controller.md.sal.common.util.jmx;
 
-import com.google.common.annotations.Beta;
 import java.lang.management.ManagementFactory;
 import javax.management.InstanceAlreadyExistsException;
 import javax.management.InstanceNotFoundException;
@@ -32,9 +31,7 @@ import org.slf4j.LoggerFactory;
  *
  * @author Thomas Pantelis
  */
-@Beta
 public abstract class AbstractMXBean {
-
     private static final Logger LOG = LoggerFactory.getLogger(AbstractMXBean.class);
 
     public static final String BASE_JMX_PREFIX = "org.opendaylight.controller:";
@@ -89,7 +86,7 @@ public abstract class AbstractMXBean {
         boolean registered = false;
         try {
             // Object to identify MBean
-            final ObjectName mbeanName = this.getMBeanObjectName();
+            final ObjectName mbeanName = getMBeanObjectName();
 
             LOG.debug("Register MBean {}", mbeanName);
 
@@ -129,16 +126,13 @@ public abstract class AbstractMXBean {
      * @return true is successfully unregistered, false otherwise.
      */
     public boolean unregisterMBean() {
-        boolean unregister = false;
         try {
-            ObjectName mbeanName = this.getMBeanObjectName();
-            unregisterMBean(mbeanName);
-            unregister = true;
+            unregisterMBean(getMBeanObjectName());
+            return true;
         } catch (MBeanRegistrationException | InstanceNotFoundException | MalformedObjectNameException e) {
             LOG.debug("Failed when unregistering MBean", e);
+            return false;
         }
-
-        return unregister;
     }
 
     private void unregisterMBean(ObjectName mbeanName) throws MBeanRegistrationException,
index 96c97d517952d6658118caa9619e9df827cb58f3..2d397cdcf5b5534e5c98e36f6f30421163221392 100644 (file)
@@ -4,7 +4,7 @@
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-parent</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../parent</relativePath>
   </parent>
 
   <packaging>bundle</packaging>
 
   <dependencies>
+    <dependency>
+      <groupId>com.github.spotbugs</groupId>
+      <artifactId>spotbugs-annotations</artifactId>
+      <optional>true</optional>
+    </dependency>
+
     <!-- Java -->
     <dependency>
       <groupId>org.slf4j</groupId>
 
     <dependency>
       <groupId>org.osgi</groupId>
-      <artifactId>osgi.core</artifactId>
+      <artifactId>org.osgi.framework</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.osgi</groupId>
+      <artifactId>org.osgi.service.component</artifactId>
     </dependency>
     <dependency>
       <groupId>org.osgi</groupId>
-      <artifactId>osgi.cmpn</artifactId>
+      <artifactId>org.osgi.service.component.annotations</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.osgi</groupId>
+      <artifactId>org.osgi.service.metatype.annotations</artifactId>
     </dependency>
 
     <!-- Akka -->
       <groupId>org.opendaylight.mdsal</groupId>
       <artifactId>mdsal-binding-dom-codec-api</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>mdsal-common-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>mdsal-dom-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>mdsal-dom-spi</artifactId>
+    </dependency>
     <dependency>
       <groupId>org.opendaylight.mdsal</groupId>
       <artifactId>mdsal-dom-broker</artifactId>
     </dependency>
-
     <dependency>
       <groupId>org.opendaylight.yangtools</groupId>
       <artifactId>concepts</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>util</artifactId>
+    </dependency>
     <dependency>
       <groupId>org.opendaylight.mdsal</groupId>
       <artifactId>yang-binding</artifactId>
       <groupId>org.opendaylight.yangtools</groupId>
       <artifactId>yang-data-impl</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-tree-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-tree-spi</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-tree-ri</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-util</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-codec-binfmt</artifactId>
+    </dependency>
     <dependency>
       <groupId>org.opendaylight.yangtools</groupId>
       <artifactId>yang-data-codec-xml</artifactId>
       <groupId>org.opendaylight.yangtools</groupId>
       <artifactId>yang-data-codec-gson</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-model-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-model-spi</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-model-util</artifactId>
+    </dependency>
     <dependency>
       <groupId>org.apache.commons</groupId>
       <artifactId>commons-lang3</artifactId>
       <artifactId>commons-text</artifactId>
     </dependency>
 
-    <dependency>
-      <groupId>io.atomix</groupId>
-      <artifactId>atomix-storage</artifactId>
-      <version>3.1.5</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>io.atomix</groupId>
-      <artifactId>atomix-utils</artifactId>
-      <version>3.1.5</version>
-      <scope>test</scope>
-    </dependency>
     <dependency>
       <groupId>org.awaitility</groupId>
       <artifactId>awaitility</artifactId>
       <artifactId>commons-io</artifactId>
       <scope>test</scope>
     </dependency>
-    <dependency>
-      <groupId>commons-lang</groupId>
-      <artifactId>commons-lang</artifactId>
-      <scope>test</scope>
-    </dependency>
     <dependency>
       <groupId>org.opendaylight.yangtools</groupId>
       <artifactId>yang-test-util</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>mdsal-binding-dom-codec</artifactId>
+      <scope>test</scope>
+    </dependency>
     <dependency>
       <groupId>org.opendaylight.mdsal</groupId>
       <artifactId>mdsal-binding-test-utils</artifactId>
           <instructions>
             <Bundle-Name>${project.groupId}.${project.artifactId}</Bundle-Name>
 
-            <!-- Karaf cannot handle Factory Component requirements, see https://issues.apache.org/jira/browse/KARAF-6625 -->
-            <_dsannotations-options>norequirements</_dsannotations-options>
-
             <Export-Package>
                 org.opendaylight.controller.cluster.datastore;
                 org.opendaylight.controller.cluster.datastore.config;
index 05af18d32fd41d0972ab06ac02c82379e276c23d..60a72b07f98ea2ce7887aa892b0b7423f889e529 100644 (file)
@@ -8,7 +8,6 @@
 package org.opendaylight.controller.cluster.akka.osgi.impl;
 
 import akka.actor.ActorSystem;
-import com.typesafe.config.Config;
 import java.util.concurrent.TimeoutException;
 import org.opendaylight.controller.cluster.ActorSystemProvider;
 import org.opendaylight.controller.cluster.ActorSystemProviderListener;
@@ -30,26 +29,12 @@ import scala.concurrent.duration.Duration;
 public final class OSGiActorSystemProvider implements ActorSystemProvider {
     private static final Logger LOG = LoggerFactory.getLogger(OSGiActorSystemProvider.class);
 
-    @Reference
-    AkkaConfigurationReader reader = null;
-
     private ActorSystemProviderImpl delegate;
 
-    @Override
-    public ActorSystem getActorSystem() {
-        return delegate.getActorSystem();
-    }
-
-    @Override
-    public ListenerRegistration<ActorSystemProviderListener> registerActorSystemProviderListener(
-            final ActorSystemProviderListener listener) {
-        return delegate.registerActorSystemProviderListener(listener);
-    }
-
     @Activate
-    void activate(final BundleContext bundleContext) {
+    public OSGiActorSystemProvider(@Reference final AkkaConfigurationReader reader, final BundleContext bundleContext) {
         LOG.info("Actor System provider starting");
-        final Config akkaConfig = AkkaConfigFactory.createAkkaConfig(reader);
+        final var akkaConfig = AkkaConfigFactory.createAkkaConfig(reader);
         delegate = new ActorSystemProviderImpl(BundleClassLoaderFactory.createClassLoader(bundleContext),
             QuarantinedMonitorActorPropsFactory.createProps(bundleContext, akkaConfig), akkaConfig);
         LOG.info("Actor System provider started");
@@ -62,5 +47,16 @@ public final class OSGiActorSystemProvider implements ActorSystemProvider {
         delegate = null;
         LOG.info("Actor System provider stopped");
     }
+
+    @Override
+    public ActorSystem getActorSystem() {
+        return delegate.getActorSystem();
+    }
+
+    @Override
+    public ListenerRegistration<ActorSystemProviderListener> registerActorSystemProviderListener(
+            final ActorSystemProviderListener listener) {
+        return delegate.registerActorSystemProviderListener(listener);
+    }
 }
 
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBroker.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBroker.java
deleted file mode 100644 (file)
index 91fd64d..0000000
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.databroker;
-
-import static com.google.common.base.Preconditions.checkState;
-
-import com.google.common.collect.ClassToInstanceMap;
-import com.google.common.collect.ImmutableClassToInstanceMap;
-import com.google.common.collect.ImmutableClassToInstanceMap.Builder;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.util.EnumMap;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicLong;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataBrokerExtension;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeService;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistration;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
-import org.opendaylight.mdsal.dom.spi.PingPongMergingDOMDataBroker;
-import org.opendaylight.mdsal.dom.spi.store.DOMStore;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTreeChangePublisher;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public abstract class AbstractDOMBroker extends AbstractDOMTransactionFactory<DOMStore>
-        implements PingPongMergingDOMDataBroker {
-
-    private static final Logger LOG = LoggerFactory.getLogger(AbstractDOMBroker.class);
-
-    private final AtomicLong txNum = new AtomicLong();
-    private final AtomicLong chainNum = new AtomicLong();
-    private final ClassToInstanceMap<DOMDataBrokerExtension> extensions;
-
-    private volatile AutoCloseable closeable;
-
-    protected AbstractDOMBroker(final Map<LogicalDatastoreType, DOMStore> datastores) {
-        super(datastores);
-
-        Builder<DOMDataBrokerExtension> extBuilder = ImmutableClassToInstanceMap.builder();
-        if (isSupported(datastores, DOMStoreTreeChangePublisher.class)) {
-            extBuilder.put(DOMDataTreeChangeService.class, new DOMDataTreeChangeService() {
-                @Override
-                public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerDataTreeChangeListener(
-                        final DOMDataTreeIdentifier treeId, final L listener) {
-                    DOMStore store = getDOMStore(treeId.getDatastoreType());
-                    return ((DOMStoreTreeChangePublisher) store).registerTreeChangeListener(
-                            treeId.getRootIdentifier(), listener);
-                }
-            });
-        }
-
-        if (isSupported(datastores, DOMDataTreeCommitCohortRegistry.class)) {
-            extBuilder.put(DOMDataTreeCommitCohortRegistry.class, new DOMDataTreeCommitCohortRegistry() {
-                @Override
-                public <T extends DOMDataTreeCommitCohort> DOMDataTreeCommitCohortRegistration<T> registerCommitCohort(
-                        final DOMDataTreeIdentifier path, final T cohort) {
-                    DOMStore store = getDOMStore(path.getDatastoreType());
-                    return ((DOMDataTreeCommitCohortRegistry) store).registerCommitCohort(path, cohort);
-                }
-            });
-        }
-
-        extensions = extBuilder.build();
-    }
-
-    private static boolean isSupported(final Map<LogicalDatastoreType, DOMStore> datastores,
-            final Class<?> expDOMStoreInterface) {
-        return datastores.values().stream().allMatch(expDOMStoreInterface::isInstance);
-    }
-
-    public void setCloseable(final AutoCloseable closeable) {
-        this.closeable = closeable;
-    }
-
-    @Override
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void close() {
-        super.close();
-
-        if (closeable != null) {
-            try {
-                closeable.close();
-            } catch (Exception e) {
-                LOG.debug("Error closing instance", e);
-            }
-        }
-    }
-
-    @Override
-    protected Object newTransactionIdentifier() {
-        return "DOM-" + txNum.getAndIncrement();
-    }
-
-    @Override
-    public ClassToInstanceMap<DOMDataBrokerExtension> getExtensions() {
-        return extensions;
-    }
-
-    @Override
-    public DOMTransactionChain createTransactionChain(final DOMTransactionChainListener listener) {
-        checkNotClosed();
-
-        final Map<LogicalDatastoreType, DOMStoreTransactionChain> backingChains =
-                new EnumMap<>(LogicalDatastoreType.class);
-        for (Map.Entry<LogicalDatastoreType, DOMStore> entry : getTxFactories().entrySet()) {
-            backingChains.put(entry.getKey(), entry.getValue().createTransactionChain());
-        }
-
-        final long chainId = chainNum.getAndIncrement();
-        LOG.debug("Transaction chain {} created with listener {}, backing store chains {}", chainId, listener,
-                backingChains);
-        return new DOMBrokerTransactionChain(chainId, backingChains, this, listener);
-    }
-
-    @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-            justification = "https://github.com/spotbugs/spotbugs/issues/811")
-    private DOMStore getDOMStore(final LogicalDatastoreType type) {
-        DOMStore store = getTxFactories().get(type);
-        checkState(store != null, "Requested logical data store is not available.");
-        return store;
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBrokerTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBrokerTransaction.java
deleted file mode 100644 (file)
index 2655b61..0000000
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.databroker;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.base.MoreObjects;
-import com.google.common.base.MoreObjects.ToStringHelper;
-import java.util.Collection;
-import java.util.EnumMap;
-import java.util.Map;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionFactory;
-
-public abstract class AbstractDOMBrokerTransaction<T extends DOMStoreTransaction> implements DOMDataTreeTransaction {
-
-    private final EnumMap<LogicalDatastoreType, T> backingTxs;
-    private final Object identifier;
-    private final Map<LogicalDatastoreType, ? extends DOMStoreTransactionFactory> storeTxFactories;
-
-    /**
-     * Creates new composite Transactions.
-     *
-     * @param identifier Identifier of transaction.
-     */
-    protected AbstractDOMBrokerTransaction(final Object identifier,
-            Map<LogicalDatastoreType, ? extends DOMStoreTransactionFactory> storeTxFactories) {
-        this.identifier = requireNonNull(identifier, "Identifier should not be null");
-        this.storeTxFactories = requireNonNull(storeTxFactories, "Store Transaction Factories should not be null");
-        this.backingTxs = new EnumMap<>(LogicalDatastoreType.class);
-    }
-
-    /**
-     * Returns subtransaction associated with supplied key.
-     *
-     * @param key the data store type key
-     * @return the subtransaction
-     * @throws NullPointerException
-     *             if key is null
-     * @throws IllegalArgumentException
-     *             if no subtransaction is associated with key.
-     */
-    protected final T getSubtransaction(final LogicalDatastoreType key) {
-        requireNonNull(key, "key must not be null.");
-
-        T ret = backingTxs.get(key);
-        if (ret == null) {
-            ret = createTransaction(key);
-            backingTxs.put(key, ret);
-        }
-        checkArgument(ret != null, "No subtransaction associated with %s", key);
-        return ret;
-    }
-
-    protected abstract T createTransaction(LogicalDatastoreType key);
-
-    /**
-     * Returns immutable Iterable of all subtransactions.
-     *
-     */
-    protected Collection<T> getSubtransactions() {
-        return backingTxs.values();
-    }
-
-    @Override
-    public Object getIdentifier() {
-        return identifier;
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    protected void closeSubtransactions() {
-        /*
-         * We share one exception for all failures, which are added
-         * as supressedExceptions to it.
-         */
-        IllegalStateException failure = null;
-        for (T subtransaction : backingTxs.values()) {
-            try {
-                subtransaction.close();
-            } catch (Exception e) {
-                // If we did not allocated failure we allocate it
-                if (failure == null) {
-                    failure = new IllegalStateException("Uncaught exception occured during closing transaction", e);
-                } else {
-                    // We update it with additional exceptions, which occurred during error.
-                    failure.addSuppressed(e);
-                }
-            }
-        }
-        // If we have failure, we throw it at after all attempts to close.
-        if (failure != null) {
-            throw failure;
-        }
-    }
-
-    protected DOMStoreTransactionFactory getTxFactory(LogicalDatastoreType type) {
-        return storeTxFactories.get(type);
-    }
-
-    @Override
-    public final String toString() {
-        return addToStringAttributes(MoreObjects.toStringHelper(this).omitNullValues()).toString();
-    }
-
-    protected ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) {
-        return toStringHelper.add("identifier", identifier);
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBrokerWriteTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBrokerWriteTransaction.java
deleted file mode 100644 (file)
index 02e9e04..0000000
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.databroker;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static com.google.common.base.Preconditions.checkState;
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.base.MoreObjects.ToStringHelper;
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.Futures;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Map;
-import java.util.concurrent.Future;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
-import org.opendaylight.mdsal.dom.broker.TransactionCommitFailedExceptionMapper;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionFactory;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public abstract class AbstractDOMBrokerWriteTransaction<T extends DOMStoreWriteTransaction>
-        extends AbstractDOMBrokerTransaction<T> implements DOMDataTreeWriteTransaction {
-
-    @SuppressWarnings("rawtypes")
-    private static final AtomicReferenceFieldUpdater<AbstractDOMBrokerWriteTransaction, AbstractDOMTransactionFactory>
-            IMPL_UPDATER = AtomicReferenceFieldUpdater.newUpdater(AbstractDOMBrokerWriteTransaction.class,
-                    AbstractDOMTransactionFactory.class, "commitImpl");
-    @SuppressWarnings("rawtypes")
-    private static final AtomicReferenceFieldUpdater<AbstractDOMBrokerWriteTransaction, Future> FUTURE_UPDATER =
-            AtomicReferenceFieldUpdater.newUpdater(AbstractDOMBrokerWriteTransaction.class, Future.class,
-                    "commitFuture");
-    private static final Logger LOG = LoggerFactory.getLogger(AbstractDOMBrokerWriteTransaction.class);
-    private static final Future<?> CANCELLED_FUTURE = Futures.immediateCancelledFuture();
-
-    /**
-     * Implementation of real commit. It also acts as an indication that
-     * the transaction is running -- which we flip atomically using
-     * {@link #IMPL_UPDATER}.
-     */
-    private volatile AbstractDOMTransactionFactory<?> commitImpl;
-
-    /**
-     * Future task of transaction commit. It starts off as null, but is
-     * set appropriately on {@link #submit()} and {@link #cancel()} via
-     * {@link AtomicReferenceFieldUpdater#lazySet(Object, Object)}.
-     * <p/>
-     * Lazy set is safe for use because it is only referenced to in the
-     * {@link #cancel()} slow path, where we will busy-wait for it. The
-     * fast path gets the benefit of a store-store barrier instead of the
-     * usual store-load barrier.
-     */
-    private volatile Future<?> commitFuture;
-
-    protected AbstractDOMBrokerWriteTransaction(final Object identifier,
-            final Map<LogicalDatastoreType, ? extends DOMStoreTransactionFactory> storeTxFactories,
-            final AbstractDOMTransactionFactory<?> commitImpl) {
-        super(identifier, storeTxFactories);
-        this.commitImpl = requireNonNull(commitImpl, "commitImpl must not be null.");
-    }
-
-    @Override
-    public void put(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode data) {
-        checkRunning(commitImpl);
-        checkInstanceIdentifierReferencesData(path,data);
-        getSubtransaction(store).write(path, data);
-    }
-
-    private static void checkInstanceIdentifierReferencesData(final YangInstanceIdentifier path,
-            final NormalizedNode data) {
-        checkArgument(data != null, "Attempted to store null data at %s", path);
-        final PathArgument lastArg = path.getLastPathArgument();
-        if (lastArg != null) {
-            checkArgument(lastArg.equals(data.getIdentifier()),
-                "Instance identifier references %s but data identifier is %s", lastArg, data);
-        }
-    }
-
-    @Override
-    public void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
-        checkRunning(commitImpl);
-        getSubtransaction(store).delete(path);
-    }
-
-    @Override
-    public void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode data) {
-        checkRunning(commitImpl);
-        checkInstanceIdentifierReferencesData(path, data);
-        getSubtransaction(store).merge(path, data);
-    }
-
-    @Override
-    public boolean cancel() {
-        final AbstractDOMTransactionFactory<?> impl = IMPL_UPDATER.getAndSet(this, null);
-        if (impl != null) {
-            LOG.trace("Transaction {} cancelled before submit", getIdentifier());
-            FUTURE_UPDATER.lazySet(this, CANCELLED_FUTURE);
-            closeSubtransactions();
-            return true;
-        }
-
-        // The transaction is in process of being submitted or cancelled. Busy-wait
-        // for the corresponding future.
-        Future<?> future;
-        do {
-            future = commitFuture;
-        }
-        while (future == null);
-
-        return future.cancel(false);
-    }
-
-    @Override
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public FluentFuture<? extends CommitInfo> commit() {
-        final AbstractDOMTransactionFactory<?> impl = IMPL_UPDATER.getAndSet(this, null);
-        checkRunning(impl);
-
-        final Collection<T> txns = getSubtransactions();
-        final Collection<DOMStoreThreePhaseCommitCohort> cohorts = new ArrayList<>(txns.size());
-
-        FluentFuture<? extends CommitInfo> ret;
-        try {
-            for (final T txn : txns) {
-                cohorts.add(txn.ready());
-            }
-
-            ret = impl.commit(this, cohorts);
-        } catch (RuntimeException e) {
-            ret = FluentFuture.from(Futures.immediateFailedFuture(
-                    TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER.apply(e)));
-        }
-        FUTURE_UPDATER.lazySet(this, ret);
-        return ret;
-    }
-
-    private void checkRunning(final AbstractDOMTransactionFactory<?> impl) {
-        checkState(impl != null, "Transaction %s is no longer running", getIdentifier());
-    }
-
-    @Override
-    protected ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) {
-        return super.addToStringAttributes(toStringHelper).add("running", commitImpl == null);
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMTransactionFactory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMTransactionFactory.java
deleted file mode 100644 (file)
index 91ca744..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.databroker;
-
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.FluentFuture;
-import java.util.Collection;
-import java.util.EnumMap;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadWriteTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionFactory;
-
-public abstract class AbstractDOMTransactionFactory<T extends DOMStoreTransactionFactory> implements AutoCloseable {
-    @SuppressWarnings("rawtypes")
-    private static final AtomicIntegerFieldUpdater<AbstractDOMTransactionFactory> UPDATER =
-            AtomicIntegerFieldUpdater.newUpdater(AbstractDOMTransactionFactory.class, "closed");
-    private final Map<LogicalDatastoreType, T> storeTxFactories;
-    private volatile int closed = 0;
-
-    protected AbstractDOMTransactionFactory(final Map<LogicalDatastoreType, T> txFactories) {
-        this.storeTxFactories = new EnumMap<>(txFactories);
-    }
-
-    /**
-     * Implementations must return unique identifier for each and every call of
-     * this method.
-     *
-     * @return new Unique transaction identifier.
-     */
-    protected abstract Object newTransactionIdentifier();
-
-    /**
-     * Submits a transaction asynchronously for commit.
-     *
-     * @param transaction the transaction to submit
-     * @param cohorts the associated cohorts
-     * @return a resulting Future
-     */
-    protected abstract FluentFuture<? extends CommitInfo> commit(DOMDataTreeWriteTransaction transaction,
-            Collection<DOMStoreThreePhaseCommitCohort> cohorts);
-
-    /**
-     * Creates a new read-only transaction.
-     *
-     * @return the transaction instance
-     */
-    public final DOMDataTreeReadTransaction newReadOnlyTransaction() {
-        checkNotClosed();
-
-        return new DOMBrokerReadOnlyTransaction(newTransactionIdentifier(), storeTxFactories);
-    }
-
-
-    /**
-     * Creates a new write-only transaction.
-     *
-     * @return the transaction instance
-     */
-    public final DOMDataTreeWriteTransaction newWriteOnlyTransaction() {
-        checkNotClosed();
-
-        return new DOMBrokerWriteOnlyTransaction(newTransactionIdentifier(), storeTxFactories, this);
-    }
-
-
-    /**
-     * Creates a new read-write transaction.
-     *
-     * @return the transaction instance
-     */
-    public final DOMDataTreeReadWriteTransaction newReadWriteTransaction() {
-        checkNotClosed();
-
-        return new DOMBrokerReadWriteTransaction(newTransactionIdentifier(), storeTxFactories, this);
-    }
-
-    /**
-     * Convenience accessor of backing factories intended to be used only by
-     * finalization of this class.
-     *
-     * <b>Note:</b>
-     * Finalization of this class may want to access other functionality of
-     * supplied Transaction factories.
-     *
-     * @return Map of backing transaction factories.
-     */
-    public final Map<LogicalDatastoreType, T> getTxFactories() {
-        return storeTxFactories;
-    }
-
-    /**
-     * Checks if instance is not closed.
-     *
-     * @throws IllegalStateException If instance of this class was closed.
-     *
-     */
-    protected final void checkNotClosed() {
-        Preconditions.checkState(closed == 0, "Transaction factory was closed. No further operations allowed.");
-    }
-
-    @Override
-    public void close() {
-        final boolean success = UPDATER.compareAndSet(this, 0, 1);
-        Preconditions.checkState(success, "Transaction factory was already closed");
-    }
-}
index dce32b6a7417b38d9941577b9c27bdb8a9aa3220..d6da8487e0f4af80278028d48bfb660f08d6cf0c 100644 (file)
@@ -9,21 +9,16 @@ package org.opendaylight.controller.cluster.databroker;
 
 import static com.google.common.base.Preconditions.checkArgument;
 import static java.util.Objects.requireNonNull;
-import static org.opendaylight.mdsal.dom.broker.TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER;
-import static org.opendaylight.mdsal.dom.broker.TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER;
-import static org.opendaylight.mdsal.dom.broker.TransactionCommitFailedExceptionMapper.PRE_COMMIT_MAPPER;
+import static org.opendaylight.mdsal.dom.spi.TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER;
+import static org.opendaylight.mdsal.dom.spi.TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER;
+import static org.opendaylight.mdsal.dom.spi.TransactionCommitFailedExceptionMapper.PRE_COMMIT_MAPPER;
 
 import com.google.common.annotations.Beta;
 import com.google.common.util.concurrent.AbstractFuture;
 import com.google.common.util.concurrent.FluentFuture;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.MoreExecutors;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
 import java.util.Map;
 import java.util.concurrent.Executor;
 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
@@ -32,11 +27,18 @@ import org.opendaylight.mdsal.common.api.CommitInfo;
 import org.opendaylight.mdsal.common.api.DataStoreUnavailableException;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.mdsal.common.api.TransactionCommitFailedException;
+import org.opendaylight.mdsal.dom.api.DOMDataBroker;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
-import org.opendaylight.mdsal.dom.broker.TransactionCommitFailedExceptionMapper;
+import org.opendaylight.mdsal.dom.spi.AbstractDOMDataBroker;
+import org.opendaylight.mdsal.dom.spi.TransactionCommitFailedExceptionMapper;
 import org.opendaylight.mdsal.dom.spi.store.DOMStore;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
 import org.opendaylight.yangtools.util.DurationStatisticsTracker;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -48,7 +50,8 @@ import org.slf4j.LoggerFactory;
  * @author Thomas Pantelis
  */
 @Beta
-public class ConcurrentDOMDataBroker extends AbstractDOMBroker {
+@Component(service = DOMDataBroker.class, property = "type=default")
+public class ConcurrentDOMDataBroker extends AbstractDOMDataBroker {
     private static final Logger LOG = LoggerFactory.getLogger(ConcurrentDOMDataBroker.class);
     private static final String CAN_COMMIT = "CAN_COMMIT";
     private static final String PRE_COMMIT = "PRE_COMMIT";
@@ -69,142 +72,97 @@ public class ConcurrentDOMDataBroker extends AbstractDOMBroker {
     public ConcurrentDOMDataBroker(final Map<LogicalDatastoreType, DOMStore> datastores,
             final Executor listenableFutureExecutor, final DurationStatisticsTracker commitStatsTracker) {
         super(datastores);
-        this.clientFutureCallbackExecutor = requireNonNull(listenableFutureExecutor);
+        clientFutureCallbackExecutor = requireNonNull(listenableFutureExecutor);
         this.commitStatsTracker = requireNonNull(commitStatsTracker);
     }
 
-    public DurationStatisticsTracker getCommitStatsTracker() {
-        return commitStatsTracker;
+    @Activate
+    public ConcurrentDOMDataBroker(@Reference final DataBrokerCommitExecutor commitExecutor,
+            @Reference(target = "(type=distributed-config)") final DOMStore configDatastore,
+            @Reference(target = "(type=distributed-operational)") final DOMStore operDatastore) {
+        this(Map.of(
+            LogicalDatastoreType.CONFIGURATION, configDatastore, LogicalDatastoreType.OPERATIONAL, operDatastore),
+            commitExecutor.executor(), commitExecutor.commitStatsTracker());
+        LOG.info("DOM Data Broker started");
+    }
+
+    @Override
+    @Deactivate
+    public void close() {
+        LOG.info("DOM Data Broker stopping");
+        super.close();
+        LOG.info("DOM Data Broker stopped");
     }
 
     @Override
     protected FluentFuture<? extends CommitInfo> commit(final DOMDataTreeWriteTransaction transaction,
-            final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
+            final DOMStoreThreePhaseCommitCohort cohort) {
 
         checkArgument(transaction != null, "Transaction must not be null.");
-        checkArgument(cohorts != null, "Cohorts must not be null.");
+        checkArgument(cohort != null, "Cohorts must not be null.");
         LOG.debug("Tx: {} is submitted for execution.", transaction.getIdentifier());
 
-        if (cohorts.isEmpty()) {
-            return CommitInfo.emptyFluentFuture();
-        }
-
-        final AsyncNotifyingSettableFuture clientSubmitFuture =
-                new AsyncNotifyingSettableFuture(clientFutureCallbackExecutor);
-
-        doCanCommit(clientSubmitFuture, transaction, cohorts);
-
-        return FluentFuture.from(clientSubmitFuture).transform(ignored -> CommitInfo.empty(),
-                MoreExecutors.directExecutor());
+        final var clientSubmitFuture = new AsyncNotifyingSettableFuture(clientFutureCallbackExecutor);
+        doCanCommit(clientSubmitFuture, transaction, cohort);
+        return FluentFuture.from(clientSubmitFuture);
     }
 
     private void doCanCommit(final AsyncNotifyingSettableFuture clientSubmitFuture,
             final DOMDataTreeWriteTransaction transaction,
-            final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
-
+            final DOMStoreThreePhaseCommitCohort cohort) {
         final long startTime = System.nanoTime();
 
-        final Iterator<DOMStoreThreePhaseCommitCohort> cohortIterator = cohorts.iterator();
-
-        // Not using Futures.allAsList here to avoid its internal overhead.
-        FutureCallback<Boolean> futureCallback = new FutureCallback<Boolean>() {
+        Futures.addCallback(cohort.canCommit(), new FutureCallback<>() {
             @Override
             public void onSuccess(final Boolean result) {
                 if (result == null || !result) {
-                    handleException(clientSubmitFuture, transaction, cohorts, CAN_COMMIT, CAN_COMMIT_ERROR_MAPPER,
-                            new TransactionCommitFailedException("Can Commit failed, no detailed cause available."));
-                } else if (!cohortIterator.hasNext()) {
-                    // All cohorts completed successfully - we can move on to the preCommit phase
-                    doPreCommit(startTime, clientSubmitFuture, transaction, cohorts);
+                    onFailure(new TransactionCommitFailedException("Can Commit failed, no detailed cause available."));
                 } else {
-                    Futures.addCallback(cohortIterator.next().canCommit(), this, MoreExecutors.directExecutor());
+                    doPreCommit(startTime, clientSubmitFuture, transaction, cohort);
                 }
             }
 
             @Override
             public void onFailure(final Throwable failure) {
-                handleException(clientSubmitFuture, transaction, cohorts, CAN_COMMIT, CAN_COMMIT_ERROR_MAPPER, failure);
+                handleException(clientSubmitFuture, transaction, cohort, CAN_COMMIT, CAN_COMMIT_ERROR_MAPPER, failure);
             }
-        };
-
-        Futures.addCallback(cohortIterator.next().canCommit(), futureCallback, MoreExecutors.directExecutor());
+        }, MoreExecutors.directExecutor());
     }
 
-    @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-            justification = "https://github.com/spotbugs/spotbugs/issues/811")
     private void doPreCommit(final long startTime, final AsyncNotifyingSettableFuture clientSubmitFuture,
-            final DOMDataTreeWriteTransaction transaction,
-            final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
-
-        final Iterator<DOMStoreThreePhaseCommitCohort> cohortIterator = cohorts.iterator();
-
-        // Not using Futures.allAsList here to avoid its internal overhead.
-        FutureCallback<Void> futureCallback = new FutureCallback<Void>() {
+            final DOMDataTreeWriteTransaction transaction, final DOMStoreThreePhaseCommitCohort cohort) {
+        Futures.addCallback(cohort.preCommit(), new FutureCallback<>() {
             @Override
-            public void onSuccess(final Void notUsed) {
-                if (!cohortIterator.hasNext()) {
-                    // All cohorts completed successfully - we can move on to the commit phase
-                    doCommit(startTime, clientSubmitFuture, transaction, cohorts);
-                } else {
-                    ListenableFuture<Void> preCommitFuture = cohortIterator.next().preCommit();
-                    Futures.addCallback(preCommitFuture, this, MoreExecutors.directExecutor());
-                }
+            public void onSuccess(final Empty result) {
+                doCommit(startTime, clientSubmitFuture, transaction, cohort);
             }
 
             @Override
             public void onFailure(final Throwable failure) {
-                handleException(clientSubmitFuture, transaction, cohorts, PRE_COMMIT, PRE_COMMIT_MAPPER, failure);
+                handleException(clientSubmitFuture, transaction, cohort, PRE_COMMIT, PRE_COMMIT_MAPPER, failure);
             }
-        };
-
-        ListenableFuture<Void> preCommitFuture = cohortIterator.next().preCommit();
-        Futures.addCallback(preCommitFuture, futureCallback, MoreExecutors.directExecutor());
+        }, MoreExecutors.directExecutor());
     }
 
-    @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-            justification = "https://github.com/spotbugs/spotbugs/issues/811")
     private void doCommit(final long startTime, final AsyncNotifyingSettableFuture clientSubmitFuture,
-            final DOMDataTreeWriteTransaction transaction,
-            final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
-
-        final Iterator<DOMStoreThreePhaseCommitCohort> cohortIterator = cohorts.iterator();
-
-        // Not using Futures.allAsList here to avoid its internal overhead.
-        FutureCallback<Void> futureCallback = new FutureCallback<Void>() {
+            final DOMDataTreeWriteTransaction transaction, final DOMStoreThreePhaseCommitCohort cohort) {
+        Futures.addCallback(cohort.commit(), new FutureCallback<CommitInfo>() {
             @Override
-            public void onSuccess(final Void notUsed) {
-                if (!cohortIterator.hasNext()) {
-                    // All cohorts completed successfully - we're done.
-                    commitStatsTracker.addDuration(System.nanoTime() - startTime);
-
-                    clientSubmitFuture.set();
-                } else {
-                    ListenableFuture<Void> commitFuture = cohortIterator.next().commit();
-                    Futures.addCallback(commitFuture, this, MoreExecutors.directExecutor());
-                }
+            public void onSuccess(final CommitInfo result) {
+                commitStatsTracker.addDuration(System.nanoTime() - startTime);
+                clientSubmitFuture.set();
             }
 
             @Override
             public void onFailure(final Throwable throwable) {
-                handleException(clientSubmitFuture, transaction, cohorts, COMMIT, COMMIT_ERROR_MAPPER, throwable);
+                handleException(clientSubmitFuture, transaction, cohort, COMMIT, COMMIT_ERROR_MAPPER, throwable);
             }
-        };
-
-        ListenableFuture<Void> commitFuture = cohortIterator.next().commit();
-        Futures.addCallback(commitFuture, futureCallback, MoreExecutors.directExecutor());
+        }, MoreExecutors.directExecutor());
     }
 
-    @SuppressFBWarnings(value = { "BC_UNCONFIRMED_CAST_OF_RETURN_VALUE", "UPM_UNCALLED_PRIVATE_METHOD" },
-            justification = "Pertains to the assignment of the 'clientException' var. FindBugs flags this as an "
-                + "uncomfirmed cast but the generic type in TransactionCommitFailedExceptionMapper is "
-                + "TransactionCommitFailedException and thus should be deemed as confirmed."
-                + "Also https://github.com/spotbugs/spotbugs/issues/811")
     private static void handleException(final AsyncNotifyingSettableFuture clientSubmitFuture,
-            final DOMDataTreeWriteTransaction transaction,
-            final Collection<DOMStoreThreePhaseCommitCohort> cohorts,
-            final String phase, final TransactionCommitFailedExceptionMapper exMapper,
-            final Throwable throwable) {
-
+            final DOMDataTreeWriteTransaction transaction, final DOMStoreThreePhaseCommitCohort cohort,
+            final String phase, final TransactionCommitFailedExceptionMapper exMapper, final Throwable throwable) {
         if (clientSubmitFuture.isDone()) {
             // We must have had failures from multiple cohorts.
             return;
@@ -213,29 +171,21 @@ public class ConcurrentDOMDataBroker extends AbstractDOMBroker {
         // Use debug instead of warn level here because this exception gets propagate back to the caller via the Future
         LOG.debug("Tx: {} Error during phase {}, starting Abort", transaction.getIdentifier(), phase, throwable);
 
-        // Transaction failed - tell all cohorts to abort.
-        @SuppressWarnings("unchecked")
-        ListenableFuture<Void>[] canCommitFutures = new ListenableFuture[cohorts.size()];
-        int index = 0;
-        for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
-            canCommitFutures[index++] = cohort.abort();
-        }
-
         // Propagate the original exception
         final Exception e;
         if (throwable instanceof NoShardLeaderException || throwable instanceof ShardLeaderNotRespondingException) {
             e = new DataStoreUnavailableException(throwable.getMessage(), throwable);
-        } else if (throwable instanceof Exception) {
-            e = (Exception)throwable;
+        } else if (throwable instanceof Exception ex) {
+            e = ex;
         } else {
             e = new RuntimeException("Unexpected error occurred", throwable);
         }
         clientSubmitFuture.setException(exMapper.apply(e));
 
-        ListenableFuture<List<Void>> combinedFuture = Futures.allAsList(canCommitFutures);
-        Futures.addCallback(combinedFuture, new FutureCallback<List<Void>>() {
+        // abort
+        Futures.addCallback(cohort.abort(), new FutureCallback<Empty>() {
             @Override
-            public void onSuccess(final List<Void> notUsed) {
+            public void onSuccess(final Empty result) {
                 // Propagate the original exception to the client.
                 LOG.debug("Tx: {} aborted successfully", transaction.getIdentifier());
             }
@@ -257,8 +207,7 @@ public class ConcurrentDOMDataBroker extends AbstractDOMBroker {
      * FIXME: This class should probably be moved to yangtools common utils for re-usability and
      * unified with AsyncNotifyingListenableFutureTask.
      */
-    private static class AsyncNotifyingSettableFuture extends AbstractFuture<Void> {
-
+    private static class AsyncNotifyingSettableFuture extends AbstractFuture<CommitInfo> {
         /**
          * ThreadLocal used to detect if the task completion thread is running the future listener Runnables.
          */
@@ -287,7 +236,7 @@ public class ConcurrentDOMDataBroker extends AbstractDOMBroker {
         boolean set() {
             ON_TASK_COMPLETION_THREAD_TL.set(Boolean.TRUE);
             try {
-                return super.set(null);
+                return super.set(CommitInfo.empty());
             } finally {
                 ON_TASK_COMPLETION_THREAD_TL.set(null);
             }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerReadOnlyTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerReadOnlyTransaction.java
deleted file mode 100644 (file)
index c4d5e1d..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.databroker;
-
-import com.google.common.util.concurrent.FluentFuture;
-import java.util.Map;
-import java.util.Optional;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionFactory;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-public class DOMBrokerReadOnlyTransaction
-        extends AbstractDOMBrokerTransaction<DOMStoreReadTransaction> implements DOMDataTreeReadTransaction {
-
-    /**
-     * Creates new composite Transactions.
-     *
-     * @param identifier Identifier of transaction.
-     */
-    protected DOMBrokerReadOnlyTransaction(final Object identifier,
-            final Map<LogicalDatastoreType, ? extends DOMStoreTransactionFactory> storeTxFactories) {
-        super(identifier, storeTxFactories);
-    }
-
-    @Override
-    public FluentFuture<Optional<NormalizedNode>> read(final LogicalDatastoreType store,
-            final YangInstanceIdentifier path) {
-        return getSubtransaction(store).read(path);
-    }
-
-    @Override
-    public FluentFuture<Boolean> exists(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
-        return getSubtransaction(store).exists(path);
-    }
-
-    @Override
-    public void close() {
-        closeSubtransactions();
-    }
-
-    @Override
-    protected DOMStoreReadTransaction createTransaction(final LogicalDatastoreType key) {
-        return getTxFactory(key).newReadOnlyTransaction();
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerReadWriteTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerReadWriteTransaction.java
deleted file mode 100644 (file)
index d9031c2..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.databroker;
-
-import com.google.common.util.concurrent.FluentFuture;
-import java.util.Map;
-import java.util.Optional;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionFactory;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-public class DOMBrokerReadWriteTransaction extends AbstractDOMBrokerWriteTransaction<DOMStoreReadWriteTransaction>
-        implements DOMDataTreeReadWriteTransaction {
-
-    /**
-     * Constructs an instance.
-     *
-     * @param identifier identifier of transaction.
-     * @param storeTxFactories the backing transaction store factories
-     */
-    protected DOMBrokerReadWriteTransaction(final Object identifier,
-            final Map<LogicalDatastoreType, ? extends DOMStoreTransactionFactory>  storeTxFactories,
-            final AbstractDOMTransactionFactory<?> commitImpl) {
-        super(identifier, storeTxFactories, commitImpl);
-    }
-
-    @Override
-    public FluentFuture<Optional<NormalizedNode>> read(final LogicalDatastoreType store,
-            final YangInstanceIdentifier path) {
-        return getSubtransaction(store).read(path);
-    }
-
-    @Override
-    public FluentFuture<Boolean> exists(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
-        return getSubtransaction(store).exists(path);
-    }
-
-    @Override
-    protected DOMStoreReadWriteTransaction createTransaction(final LogicalDatastoreType key) {
-        return getTxFactory(key).newReadWriteTransaction();
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerTransactionChain.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerTransactionChain.java
deleted file mode 100644 (file)
index 3364d23..0000000
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.databroker;
-
-import static com.google.common.base.Preconditions.checkState;
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.MoreExecutors;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.util.Collection;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-final class DOMBrokerTransactionChain extends AbstractDOMTransactionFactory<DOMStoreTransactionChain>
-        implements DOMTransactionChain {
-    private enum State {
-        RUNNING,
-        CLOSING,
-        CLOSED,
-        FAILED,
-    }
-
-    private static final AtomicIntegerFieldUpdater<DOMBrokerTransactionChain> COUNTER_UPDATER =
-            AtomicIntegerFieldUpdater.newUpdater(DOMBrokerTransactionChain.class, "counter");
-    private static final AtomicReferenceFieldUpdater<DOMBrokerTransactionChain, State> STATE_UPDATER =
-            AtomicReferenceFieldUpdater.newUpdater(DOMBrokerTransactionChain.class, State.class, "state");
-    private static final Logger LOG = LoggerFactory.getLogger(DOMBrokerTransactionChain.class);
-    private final AtomicLong txNum = new AtomicLong();
-    private final AbstractDOMBroker broker;
-    private final DOMTransactionChainListener listener;
-    private final long chainId;
-
-    private volatile State state = State.RUNNING;
-    private volatile int counter = 0;
-
-    /**
-     * Constructs an instance.
-     *
-     * @param chainId
-     *            ID of transaction chain
-     * @param chains
-     *            Backing {@link DOMStoreTransactionChain}s.
-     * @param listener
-     *            Listener, which listens on transaction chain events.
-     * @throws NullPointerException
-     *             If any of arguments is null.
-     */
-    DOMBrokerTransactionChain(final long chainId, final Map<LogicalDatastoreType, DOMStoreTransactionChain> chains,
-            final AbstractDOMBroker broker, final DOMTransactionChainListener listener) {
-        super(chains);
-        this.chainId = chainId;
-        this.broker = requireNonNull(broker);
-        this.listener = requireNonNull(listener);
-    }
-
-    private void checkNotFailed() {
-        checkState(state != State.FAILED, "Transaction chain has failed");
-    }
-
-    @Override
-    protected Object newTransactionIdentifier() {
-        return "DOM-CHAIN-" + chainId + "-" + txNum.getAndIncrement();
-    }
-
-    @Override
-    public FluentFuture<? extends CommitInfo> commit(
-            final DOMDataTreeWriteTransaction transaction, final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
-        checkNotFailed();
-        checkNotClosed();
-
-        final FluentFuture<? extends CommitInfo> ret = broker.commit(transaction, cohorts);
-
-        COUNTER_UPDATER.incrementAndGet(this);
-        ret.addCallback(new FutureCallback<CommitInfo>() {
-            @Override
-            public void onSuccess(final CommitInfo result) {
-                transactionCompleted();
-            }
-
-            @Override
-            public void onFailure(final Throwable failure) {
-                transactionFailed(transaction, failure);
-            }
-        }, MoreExecutors.directExecutor());
-
-        return ret;
-    }
-
-    @Override
-    public void close() {
-        final boolean success = STATE_UPDATER.compareAndSet(this, State.RUNNING, State.CLOSING);
-        if (!success) {
-            LOG.debug("Chain {} is no longer running", this);
-            return;
-        }
-
-        super.close();
-        for (DOMStoreTransactionChain subChain : getTxFactories().values()) {
-            subChain.close();
-        }
-
-        if (counter == 0) {
-            finishClose();
-        }
-    }
-
-    private void finishClose() {
-        state = State.CLOSED;
-        listener.onTransactionChainSuccessful(this);
-    }
-
-    @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-            justification = "https://github.com/spotbugs/spotbugs/issues/811")
-    private void transactionCompleted() {
-        if (COUNTER_UPDATER.decrementAndGet(this) == 0 && state == State.CLOSING) {
-            finishClose();
-        }
-    }
-
-    @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-            justification = "https://github.com/spotbugs/spotbugs/issues/811")
-    private void transactionFailed(final DOMDataTreeWriteTransaction tx, final Throwable cause) {
-        state = State.FAILED;
-        LOG.debug("Transaction chain {} failed.", this, cause);
-        listener.onTransactionChainFailed(this, tx, cause);
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerWriteOnlyTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerWriteOnlyTransaction.java
deleted file mode 100644 (file)
index 2f0915d..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2015 Huawei Technologies Co. Ltd. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.databroker;
-
-import java.util.Map;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionFactory;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-
-public class DOMBrokerWriteOnlyTransaction extends AbstractDOMBrokerWriteTransaction<DOMStoreWriteTransaction> {
-
-    /**
-     * Constructs an instance.
-     *
-     * @param identifier identifier of transaction.
-     * @param storeTxFactories the backing transaction store factories
-     */
-    public DOMBrokerWriteOnlyTransaction(Object identifier,
-            Map<LogicalDatastoreType, ? extends DOMStoreTransactionFactory> storeTxFactories,
-            AbstractDOMTransactionFactory<?> commitImpl) {
-        super(identifier, storeTxFactories, commitImpl);
-    }
-
-    @Override
-    protected DOMStoreWriteTransaction createTransaction(LogicalDatastoreType key) {
-        return getTxFactory(key).newWriteOnlyTransaction();
-    }
-
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DataBrokerCommitExecutor.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DataBrokerCommitExecutor.java
new file mode 100644 (file)
index 0000000..bdea393
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.databroker;
+
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl;
+import org.opendaylight.yangtools.util.DurationStatisticsTracker;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.metatype.annotations.AttributeDefinition;
+import org.osgi.service.metatype.annotations.Designate;
+import org.osgi.service.metatype.annotations.ObjectClassDefinition;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Component(
+    service = DataBrokerCommitExecutor.class,
+    configurationPid = "org.opendaylight.controller.cluster.datastore.broker")
+@Designate(ocd = DataBrokerCommitExecutor.Config.class)
+public final class DataBrokerCommitExecutor {
+    @ObjectClassDefinition
+    public @interface Config {
+        @AttributeDefinition(name = "max-data-broker-future-callback-queue-size")
+        int callbackQueueSize() default 1000;
+        @AttributeDefinition(name = "max-data-broker-future-callback-pool-size")
+        int callbackPoolSize() default 20;
+    }
+
+    private static final Logger LOG = LoggerFactory.getLogger(DataBrokerCommitExecutor.class);
+
+    private final DurationStatisticsTracker commitStatsTracker = DurationStatisticsTracker.createConcurrent();
+    private final ThreadExecutorStatsMXBeanImpl threadStats;
+    private final CommitStatsMXBeanImpl commitStats;
+    private final ExecutorService executorService;
+
+    @Activate
+    public DataBrokerCommitExecutor(final Config config) {
+        executorService = SpecialExecutors.newBlockingBoundedCachedThreadPool(config.callbackPoolSize(),
+            config.callbackQueueSize(), "CommitFutures", ConcurrentDOMDataBroker.class);
+        threadStats = ThreadExecutorStatsMXBeanImpl.create(executorService, "CommitFutureExecutorStats",
+            "DOMDataBroker");
+        commitStats = new CommitStatsMXBeanImpl(commitStatsTracker, "DOMDataBroker");
+        commitStats.register();
+        LOG.info("DOM Data Broker commit exector started");
+    }
+
+    @Deactivate
+    void deactivate() {
+        LOG.info("DOM Data Broker commit exector stopping");
+        commitStats.unregister();
+        threadStats.unregister();
+        executorService.shutdown();
+        try {
+            executorService.awaitTermination(1, TimeUnit.MINUTES);
+        } catch (InterruptedException e) {
+            LOG.warn("Future executor failed to finish in time, giving up", e);
+        }
+        LOG.info("DOM Data Broker commit exector stopped");
+    }
+
+    Executor executor() {
+        return executorService;
+    }
+
+    DurationStatisticsTracker commitStatsTracker() {
+        return commitStatsTracker;
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/OSGiDOMDataBroker.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/OSGiDOMDataBroker.java
deleted file mode 100644 (file)
index 3395c72..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.databroker;
-
-import com.google.common.annotations.Beta;
-import com.google.common.collect.ClassToInstanceMap;
-import com.google.common.collect.ImmutableMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataBroker;
-import org.opendaylight.mdsal.dom.api.DOMDataBrokerExtension;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadWriteTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
-import org.opendaylight.mdsal.dom.spi.store.DOMStore;
-import org.opendaylight.yangtools.util.DurationStatisticsTracker;
-import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
-import org.osgi.service.component.annotations.Activate;
-import org.osgi.service.component.annotations.Component;
-import org.osgi.service.component.annotations.Deactivate;
-import org.osgi.service.component.annotations.Reference;
-import org.osgi.service.metatype.annotations.AttributeDefinition;
-import org.osgi.service.metatype.annotations.Designate;
-import org.osgi.service.metatype.annotations.ObjectClassDefinition;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@Beta
-@Component(immediate = true, configurationPid = "org.opendaylight.controller.cluster.datastore.broker",
-    property = "type=default")
-@Designate(ocd = OSGiDOMDataBroker.Config.class)
-public final class OSGiDOMDataBroker implements DOMDataBroker {
-    @ObjectClassDefinition
-    public @interface Config {
-        @AttributeDefinition(name = "max-data-broker-future-callback-queue-size")
-        int callbackQueueSize() default 1000;
-        @AttributeDefinition(name = "max-data-broker-future-callback-pool-size")
-        int callbackPoolSize() default 20;
-    }
-
-    private static final Logger LOG = LoggerFactory.getLogger(OSGiDOMDataBroker.class);
-
-    @Reference(target = "(type=distributed-config)")
-    DOMStore configDatastore = null;
-    @Reference(target = "(type=distributed-operational)")
-    DOMStore operDatastore = null;
-
-    private ExecutorService executorService;
-    private ConcurrentDOMDataBroker delegate;
-    private CommitStatsMXBeanImpl commitStats;
-    private ThreadExecutorStatsMXBeanImpl threadStats;
-
-    @Override
-    public DOMDataTreeReadTransaction newReadOnlyTransaction() {
-        return delegate.newReadOnlyTransaction();
-    }
-
-    @Override
-    public DOMDataTreeWriteTransaction newWriteOnlyTransaction() {
-        return delegate.newWriteOnlyTransaction();
-    }
-
-    @Override
-    public DOMDataTreeReadWriteTransaction newReadWriteTransaction() {
-        return delegate.newReadWriteTransaction();
-    }
-
-    @Override
-    public ClassToInstanceMap<DOMDataBrokerExtension> getExtensions() {
-        return delegate.getExtensions();
-    }
-
-    @Override
-    public DOMTransactionChain createTransactionChain(final DOMTransactionChainListener listener) {
-        return delegate.createTransactionChain(listener);
-    }
-
-    @Override
-    public DOMTransactionChain createMergingTransactionChain(final DOMTransactionChainListener listener) {
-        return delegate.createMergingTransactionChain(listener);
-    }
-
-    @Activate
-    void activate(final Config config) {
-        LOG.info("DOM Data Broker starting");
-        final DurationStatisticsTracker commitStatsTracker = DurationStatisticsTracker.createConcurrent();
-
-        executorService = SpecialExecutors.newBlockingBoundedCachedThreadPool(config.callbackPoolSize(),
-            config.callbackQueueSize(), "CommitFutures", ConcurrentDOMDataBroker.class);
-        delegate = new ConcurrentDOMDataBroker(ImmutableMap.of(
-            LogicalDatastoreType.CONFIGURATION, configDatastore, LogicalDatastoreType.OPERATIONAL, operDatastore),
-            executorService, commitStatsTracker);
-
-        commitStats = new CommitStatsMXBeanImpl(commitStatsTracker, "DOMDataBroker");
-        commitStats.register();
-        threadStats = ThreadExecutorStatsMXBeanImpl.create(executorService, "CommitFutureExecutorStats",
-            "DOMDataBroker");
-
-        LOG.info("DOM Data Broker started");
-    }
-
-    @Deactivate
-    void deactivate() {
-        LOG.info("DOM Data Broker stopping");
-        commitStats.unregister();
-        if (threadStats != null) {
-            threadStats.unregister();
-        }
-
-        delegate.close();
-        executorService.shutdown();
-        try {
-            executorService.awaitTermination(1, TimeUnit.MINUTES);
-        } catch (InterruptedException e) {
-            LOG.warn("Future executor failed to finish in time, giving up", e);
-        }
-        LOG.info("DOM Data Broker stopped");
-    }
-}
index 003c073de8ea12a101e9542cd4dab9259a8ae946..d10627dcf93e065df03c5f5130da50303d9ecc79 100644 (file)
@@ -15,6 +15,7 @@ import com.google.common.base.MoreObjects;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import java.util.stream.Stream;
 import org.eclipse.jdt.annotation.NonNull;
 import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
@@ -107,10 +108,16 @@ public abstract class AbstractClientHandle<T extends AbstractProxyTransaction> e
     }
 
     final T ensureProxy(final YangInstanceIdentifier path) {
-        final State<T> local = getState();
-        final Long shard = parent.resolveShardForPath(path);
+        return ensureProxy(getState(), parent.resolveShardForPath(path));
+    }
+
+    private T ensureProxy(final State<T> localState, final Long shard) {
+        return localState.computeIfAbsent(shard, this::createProxy);
+    }
 
-        return local.computeIfAbsent(shard, this::createProxy);
+    final Stream<T> ensureAllProxies() {
+        final var local = getState();
+        return parent.resolveAllShards().map(shard -> ensureProxy(local, shard));
     }
 
     final AbstractClientHistory parent() {
index 95552b382e21e4c25cf63d8db25ab3e9caec99d7..796c23614e2fa220660bb09c6e7db0f84162b8df 100644 (file)
@@ -20,6 +20,7 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicLongFieldUpdater;
 import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
 import java.util.concurrent.locks.StampedLock;
+import java.util.stream.Stream;
 import org.checkerframework.checker.lock.qual.GuardedBy;
 import org.checkerframework.checker.lock.qual.Holding;
 import org.eclipse.jdt.annotation.NonNull;
@@ -31,6 +32,7 @@ import org.opendaylight.controller.cluster.access.commands.CreateLocalHistoryReq
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.Response;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.mdsal.dom.api.DOMTransactionChainClosedException;
 import org.opendaylight.yangtools.concepts.Identifiable;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
@@ -117,6 +119,14 @@ public abstract class AbstractClientHistory extends LocalAbortable implements Id
         return client.resolveShardForPath(path);
     }
 
+    final Stream<Long> resolveAllShards() {
+        return client.resolveAllShards();
+    }
+
+    final ActorUtils actorUtils() {
+        return client.actorUtils();
+    }
+
     @Override
     final void localAbort(final Throwable cause) {
         final State oldState = STATE_UPDATER.getAndSet(this, State.CLOSED);
index e5d8abcb629826b4a505b3eef08136052bc57ec8..507f065d49cfe370d589326bdb92bbc37a477844 100644 (file)
@@ -12,6 +12,7 @@ import static java.util.Objects.requireNonNull;
 
 import akka.actor.ActorRef;
 import akka.util.Timeout;
+import com.google.common.base.Throwables;
 import java.util.concurrent.TimeUnit;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.client.AbstractClientActor;
@@ -53,10 +54,9 @@ public abstract class AbstractDataStoreClientActor extends AbstractClientActor {
         try {
             return (DataStoreClient) Await.result(ExplicitAsk.ask(actor, GET_CLIENT_FACTORY,
                 Timeout.apply(timeout, unit)), Duration.Inf());
-        } catch (RuntimeException e) {
-            throw e;
         } catch (Exception e) {
-            throw new RuntimeException(e);
+            Throwables.throwIfUnchecked(e);
+            throw new IllegalStateException(e);
         }
     }
 }
index 4f91cb27fae151a26ba5c49dbd8ba12498dc8238..82567c40d930c2300407eb710cbe1d40b89fa022 100644 (file)
@@ -17,13 +17,14 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.StampedLock;
-import org.opendaylight.controller.cluster.access.client.BackendInfoResolver;
+import java.util.stream.Stream;
 import org.opendaylight.controller.cluster.access.client.ClientActorBehavior;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
 import org.opendaylight.controller.cluster.access.client.ConnectedClientConnection;
 import org.opendaylight.controller.cluster.access.client.ConnectionEntry;
 import org.opendaylight.controller.cluster.access.client.ReconnectForwarder;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -71,7 +72,7 @@ abstract class AbstractDataStoreClientBehavior extends ClientActorBehavior<Shard
     private volatile Throwable aborted;
 
     AbstractDataStoreClientBehavior(final ClientActorContext context,
-            final BackendInfoResolver<ShardBackendInfo> resolver) {
+            final AbstractShardBackendResolver resolver) {
         super(context, resolver);
         singleHistory = new SingleClientHistory(this, new LocalHistoryIdentifier(getIdentifier(), 0));
     }
@@ -194,7 +195,7 @@ abstract class AbstractDataStoreClientBehavior extends ClientActorBehavior<Shard
         try {
             if (aborted != null) {
                 Throwables.throwIfUnchecked(aborted);
-                throw new RuntimeException(aborted);
+                throw new IllegalStateException(aborted);
             }
 
             final ClientLocalHistory history = new ClientLocalHistory(this, historyId);
@@ -224,4 +225,10 @@ abstract class AbstractDataStoreClientBehavior extends ClientActorBehavior<Shard
     }
 
     abstract Long resolveShardForPath(YangInstanceIdentifier path);
+
+    abstract Stream<Long> resolveAllShards();
+
+    final ActorUtils actorUtils() {
+        return ((AbstractShardBackendResolver) resolver()).actorUtils();
+    }
 }
index 8fb042fba10a69222ca7219b9b258ef51cc9e664..14ad54699161a60719ea846d353c46e73fb4adea 100644 (file)
@@ -49,6 +49,7 @@ import org.opendaylight.controller.cluster.access.concepts.RequestFailure;
 import org.opendaylight.controller.cluster.access.concepts.Response;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.yangtools.concepts.Identifiable;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.slf4j.Logger;
@@ -64,10 +65,9 @@ import org.slf4j.LoggerFactory;
  * <p>
  * This class interacts with the queueing mechanism in ClientActorBehavior, hence once we arrive at a decision
  * to use either a local or remote implementation, we are stuck with it. We can re-evaluate on the next transaction.
- *
- * @author Robert Varga
  */
-abstract class AbstractProxyTransaction implements Identifiable<TransactionIdentifier> {
+abstract sealed class AbstractProxyTransaction implements Identifiable<TransactionIdentifier>
+        permits LocalProxyTransaction, RemoteProxyTransaction {
     /**
      * Marker object used instead of read-type of requests, which are satisfied only once. This has a lower footprint
      * and allows compressing multiple requests into a single entry. This class is not thread-safe.
@@ -139,7 +139,7 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
                 latch.await();
             } catch (InterruptedException e) {
                 LOG.warn("Interrupted while waiting for latch of {}", successor);
-                throw new RuntimeException(e);
+                throw new IllegalStateException(e);
             }
             return successor;
         }
@@ -157,7 +157,7 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
                     this.prevState);
             this.prevState = requireNonNull(prevState);
             // We cannot have duplicate successor states, so this check is sufficient
-            this.done = DONE.equals(prevState);
+            done = DONE.equals(prevState);
         }
 
         // To be called from safe contexts, where successor is known to be completed
@@ -354,7 +354,7 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
         // Propagate state and seal the successor.
         final Optional<ModifyTransactionRequest> optState = flushState();
         if (optState.isPresent()) {
-            forwardToSuccessor(successor, optState.get(), null);
+            forwardToSuccessor(successor, optState.orElseThrow(), null);
         }
         successor.predecessorSealed();
     }
@@ -448,7 +448,7 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
         });
     }
 
-    final void abort(final VotingFuture<Void> ret) {
+    final void abort(final VotingFuture<Empty> ret) {
         checkSealed();
 
         sendDoAbort(t -> {
@@ -733,7 +733,7 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
             final long enqueuedTicks = parent.currentTime();
             final Optional<ModifyTransactionRequest> optState = flushState();
             if (optState.isPresent()) {
-                successor.handleReplayedRemoteRequest(optState.get(), null, enqueuedTicks);
+                successor.handleReplayedRemoteRequest(optState.orElseThrow(), null, enqueuedTicks);
             }
             if (successor.markSealed()) {
                 successor.sealAndSend(OptionalLong.of(enqueuedTicks));
@@ -854,7 +854,11 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
     abstract void handleReplayedRemoteRequest(TransactionRequest<?> request,
             @Nullable Consumer<Response<?, ?>> callback, long enqueuedTicks);
 
-    private static IllegalStateException unhandledResponseException(final Response<?, ?> resp) {
+    static final @NonNull IllegalArgumentException unhandledRequest(final TransactionRequest<?> request) {
+        return new IllegalArgumentException("Unhandled request " + request);
+    }
+
+    private static @NonNull IllegalStateException unhandledResponseException(final Response<?, ?> resp) {
         return new IllegalStateException("Unhandled response " + resp.getClass());
     }
 
index ca784fed7a73783e81e89812f9a06412e2197552..5a436a53d3b2978ac3c1d4585220d3a2fbf44a58 100644 (file)
@@ -74,7 +74,7 @@ abstract class AbstractShardBackendResolver extends BackendInfoResolver<ShardBac
 
         private synchronized void onStageResolved(final ShardBackendInfo info, final Throwable failure) {
             if (failure == null) {
-                this.result = requireNonNull(info);
+                result = requireNonNull(info);
             } else {
                 LOG.warn("Failed to resolve shard", failure);
             }
@@ -97,7 +97,7 @@ abstract class AbstractShardBackendResolver extends BackendInfoResolver<ShardBac
     // FIXME: we really need just ActorContext.findPrimaryShardAsync()
     AbstractShardBackendResolver(final ClientIdentifier clientId, final ActorUtils actorUtils) {
         this.actorUtils = requireNonNull(actorUtils);
-        this.connectFunction = ExplicitAsk.toScala(t -> new ConnectClientRequest(clientId, t, ABIVersion.BORON,
+        connectFunction = ExplicitAsk.toScala(t -> new ConnectClientRequest(clientId, t, ABIVersion.POTASSIUM,
             ABIVersion.current()));
     }
 
@@ -107,7 +107,7 @@ abstract class AbstractShardBackendResolver extends BackendInfoResolver<ShardBac
         return () -> staleBackendInfoCallbacks.remove(callback);
     }
 
-    protected void notifyStaleBackendInfoCallbacks(Long cookie) {
+    protected void notifyStaleBackendInfoCallbacks(final Long cookie) {
         staleBackendInfoCallbacks.forEach(callback -> callback.accept(cookie));
     }
 
index b2f66d5d31b1952c639c6cc3b24d34bcd5d0b1be..77de1e45d82bd0e8f797b4152830cf8aa9cda949 100644 (file)
@@ -14,6 +14,7 @@ import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.yangtools.yang.common.Empty;
 
 /**
  * Base class for internal {@link DOMStoreThreePhaseCommitCohort} implementation. It contains utility constants for
@@ -23,7 +24,7 @@ import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
  */
 abstract class AbstractTransactionCommitCohort implements DOMStoreThreePhaseCommitCohort {
     static final ListenableFuture<Boolean> TRUE_FUTURE = Futures.immediateFuture(Boolean.TRUE);
-    static final ListenableFuture<Void> VOID_FUTURE = Futures.immediateFuture(null);
+    static final ListenableFuture<Empty> EMPTY_FUTURE = Futures.immediateFuture(Empty.value());
 
     private final AbstractClientHistory parent;
     private final TransactionIdentifier txId;
index d20a618c3df0c2d97feb8fbf951d6168a00cd5cb..5611a1044f1446452ab8eeedbf292ca9c8384dbd 100644 (file)
@@ -11,6 +11,7 @@ import com.google.common.annotations.Beta;
 import com.google.common.util.concurrent.FluentFuture;
 import java.util.Optional;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.utils.RootScatterGather;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
@@ -28,19 +29,20 @@ public class ClientSnapshot extends AbstractClientHandle<AbstractProxyTransactio
     }
 
     public FluentFuture<Boolean> exists(final YangInstanceIdentifier path) {
-        return ensureSnapshotProxy(path).exists(path);
+        return ensureProxy(path).exists(path);
     }
 
     public FluentFuture<Optional<NormalizedNode>> read(final YangInstanceIdentifier path) {
-        return ensureSnapshotProxy(path).read(path);
+        return path.isEmpty() ? readRoot() : ensureProxy(path).read(path);
+    }
+
+    private FluentFuture<Optional<NormalizedNode>> readRoot() {
+        return RootScatterGather.gather(parent().actorUtils(), ensureAllProxies()
+            .map(proxy -> proxy.read(YangInstanceIdentifier.of())));
     }
 
     @Override
     final AbstractProxyTransaction createProxy(final Long shard) {
         return parent().createSnapshotProxy(getIdentifier(), shard);
     }
-
-    private AbstractProxyTransaction ensureSnapshotProxy(final YangInstanceIdentifier path) {
-        return ensureProxy(path);
-    }
 }
index 7cdc04aba17b471ea218e3e8060141b9a8a2451e..b2ff5d5184d58753fa76bf90df457abfe6e685f0 100644 (file)
@@ -14,9 +14,13 @@ import com.google.common.util.concurrent.FluentFuture;
 import java.util.Collection;
 import java.util.Map;
 import java.util.Optional;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.utils.RootScatterGather;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
 /**
@@ -55,28 +59,57 @@ public class ClientTransaction extends AbstractClientHandle<AbstractProxyTransac
         super(parent, transactionId);
     }
 
-    private AbstractProxyTransaction ensureTransactionProxy(final YangInstanceIdentifier path) {
-        return ensureProxy(path);
-    }
-
     public FluentFuture<Boolean> exists(final YangInstanceIdentifier path) {
-        return ensureTransactionProxy(path).exists(path);
+        return ensureProxy(path).exists(path);
     }
 
     public FluentFuture<Optional<NormalizedNode>> read(final YangInstanceIdentifier path) {
-        return ensureTransactionProxy(path).read(path);
+        return path.isEmpty() ? readRoot() : ensureProxy(path).read(path);
+    }
+
+    private FluentFuture<Optional<NormalizedNode>> readRoot() {
+        return RootScatterGather.gather(parent().actorUtils(), ensureAllProxies()
+            .map(proxy -> proxy.read(YangInstanceIdentifier.of())));
     }
 
     public void delete(final YangInstanceIdentifier path) {
-        ensureTransactionProxy(path).delete(path);
+        if (path.isEmpty()) {
+            ensureAllProxies().forEach(proxy -> proxy.delete(YangInstanceIdentifier.of()));
+        } else {
+            ensureProxy(path).delete(path);
+        }
     }
 
     public void merge(final YangInstanceIdentifier path, final NormalizedNode data) {
-        ensureTransactionProxy(path).merge(path, data);
+        if (path.isEmpty()) {
+            mergeRoot(RootScatterGather.castRootNode(data));
+        } else {
+            ensureProxy(path).merge(path, data);
+        }
+    }
+
+    private void mergeRoot(final @NonNull ContainerNode rootData) {
+        if (!rootData.isEmpty()) {
+            RootScatterGather.scatterTouched(rootData, this::ensureProxy).forEach(
+                scattered -> scattered.shard().merge(YangInstanceIdentifier.of(), scattered.container()));
+        }
     }
 
     public void write(final YangInstanceIdentifier path, final NormalizedNode data) {
-        ensureTransactionProxy(path).write(path, data);
+        if (path.isEmpty()) {
+            writeRoot(RootScatterGather.castRootNode(data));
+        } else {
+            ensureProxy(path).write(path, data);
+        }
+    }
+
+    private void writeRoot(final @NonNull ContainerNode rootData) {
+        RootScatterGather.scatterAll(rootData, this::ensureProxy, ensureAllProxies()).forEach(
+            scattered -> scattered.shard().write(YangInstanceIdentifier.of(), scattered.container()));
+    }
+
+    private AbstractProxyTransaction ensureProxy(final PathArgument childId) {
+        return ensureProxy(YangInstanceIdentifier.of(childId));
     }
 
     public DOMStoreThreePhaseCommitCohort ready() {
@@ -90,19 +123,11 @@ public class ClientTransaction extends AbstractClientHandle<AbstractProxyTransac
         final AbstractClientHistory parent = parent();
         parent.onTransactionShardsBound(txId, participants.keySet());
 
-        final AbstractTransactionCommitCohort cohort;
-        switch (toReady.size()) {
-            case 0:
-                cohort = new EmptyTransactionCommitCohort(parent, txId);
-                break;
-            case 1:
-                cohort = new DirectTransactionCommitCohort(parent, txId, toReady.iterator().next());
-                break;
-            default:
-                cohort = new ClientTransactionCommitCohort(parent, txId, toReady);
-                break;
-        }
-
+        final AbstractTransactionCommitCohort cohort = switch (toReady.size()) {
+            case 0 -> new EmptyTransactionCommitCohort(parent, txId);
+            case 1 -> new DirectTransactionCommitCohort(parent, txId, toReady.iterator().next());
+            default -> new ClientTransactionCommitCohort(parent, txId, toReady);
+        };
         return parent.onTransactionReady(this, cohort);
     }
 
index a4eb5e074f421ffc3e8bb718f02e747540f3839a..7887577a939bc3abf2c5d84d24e1dfe320f75299 100644 (file)
@@ -12,6 +12,8 @@ import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.MoreExecutors;
 import java.util.Collection;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.mdsal.common.api.CommitInfo;
+import org.opendaylight.yangtools.yang.common.Empty;
 
 final class ClientTransactionCommitCohort extends AbstractTransactionCommitCohort {
     private final Collection<AbstractProxyTransaction> proxies;
@@ -35,14 +37,14 @@ final class ClientTransactionCommitCohort extends AbstractTransactionCommitCohor
         return ret;
     }
 
-    private ListenableFuture<Void> addComplete(final ListenableFuture<Void> future) {
+    private <T> ListenableFuture<T> addComplete(final ListenableFuture<T> future) {
         future.addListener(this::complete, MoreExecutors.directExecutor());
         return future;
     }
 
     @Override
-    public ListenableFuture<Void> preCommit() {
-        final VotingFuture<Void> ret = new VotingFuture<>(null, proxies.size());
+    public ListenableFuture<Empty> preCommit() {
+        final var ret = new VotingFuture<>(Empty.value(), proxies.size());
         for (AbstractProxyTransaction proxy : proxies) {
             proxy.preCommit(ret);
         }
@@ -51,8 +53,8 @@ final class ClientTransactionCommitCohort extends AbstractTransactionCommitCohor
     }
 
     @Override
-    public ListenableFuture<Void> commit() {
-        final VotingFuture<Void> ret = new VotingFuture<>(null, proxies.size());
+    public ListenableFuture<CommitInfo> commit() {
+        final var ret = new VotingFuture<>(CommitInfo.empty(), proxies.size());
         for (AbstractProxyTransaction proxy : proxies) {
             proxy.doCommit(ret);
         }
@@ -61,8 +63,8 @@ final class ClientTransactionCommitCohort extends AbstractTransactionCommitCohor
     }
 
     @Override
-    public ListenableFuture<Void> abort() {
-        final VotingFuture<Void> ret = new VotingFuture<>(null, proxies.size());
+    public ListenableFuture<Empty> abort() {
+        final var ret = new VotingFuture<>(Empty.value(), proxies.size());
         for (AbstractProxyTransaction proxy : proxies) {
             proxy.abort(ret);
         }
index 9b21b98682bf71a7e807985dba4f0f5c7e586832..5b5ff5864a3f8a50bffa650fce2db2a04e7f4fdd 100644 (file)
@@ -11,6 +11,8 @@ import static java.util.Objects.requireNonNull;
 
 import com.google.common.util.concurrent.ListenableFuture;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.mdsal.common.api.CommitInfo;
+import org.opendaylight.yangtools.yang.common.Empty;
 
 /**
  * An {@link AbstractTransactionCommitCohort} implementation for transactions which contain a single proxy. Since there
@@ -33,19 +35,19 @@ final class DirectTransactionCommitCohort extends AbstractTransactionCommitCohor
     }
 
     @Override
-    public ListenableFuture<Void> preCommit() {
-        return VOID_FUTURE;
+    public ListenableFuture<Empty> preCommit() {
+        return EMPTY_FUTURE;
     }
 
     @Override
-    public ListenableFuture<Void> abort() {
+    public ListenableFuture<Empty> abort() {
         complete();
-        return VOID_FUTURE;
+        return EMPTY_FUTURE;
     }
 
     @Override
-    public ListenableFuture<Void> commit() {
+    public ListenableFuture<CommitInfo> commit() {
         complete();
-        return VOID_FUTURE;
+        return CommitInfo.emptyFluentFuture();
     }
 }
index e40da21d138c2c5bde8876bae053bdf3f96db03b..f8927c28c859f0eab65c00efccd7a8a304baf109 100644 (file)
@@ -7,7 +7,7 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
-import java.util.function.Function;
+import java.util.stream.Stream;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
@@ -18,12 +18,12 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
  * @author Robert Varga
  */
 final class DistributedDataStoreClientBehavior extends AbstractDataStoreClientBehavior {
-    private final Function<YangInstanceIdentifier, Long> pathToShard;
+    private final ModuleShardBackendResolver resolver;
 
     private DistributedDataStoreClientBehavior(final ClientActorContext context,
             final ModuleShardBackendResolver resolver) {
         super(context, resolver);
-        pathToShard = resolver::resolveShardForPath;
+        this.resolver = resolver;
     }
 
     DistributedDataStoreClientBehavior(final ClientActorContext context, final ActorUtils actorUtils) {
@@ -32,7 +32,12 @@ final class DistributedDataStoreClientBehavior extends AbstractDataStoreClientBe
 
     @Override
     Long resolveShardForPath(final YangInstanceIdentifier path) {
-        return pathToShard.apply(path);
+        return resolver.resolveShardForPath(path);
+    }
+
+    @Override
+    Stream<Long> resolveAllShards() {
+        return resolver.resolveAllShards();
     }
 
     @Override
index 7193dd053f762cb37c4cf701afe1efd89fb33919..5b11d8679e31ba9801a10638228f0bb58504ee15 100644 (file)
@@ -9,6 +9,8 @@ package org.opendaylight.controller.cluster.databroker.actors.dds;
 
 import com.google.common.util.concurrent.ListenableFuture;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.mdsal.common.api.CommitInfo;
+import org.opendaylight.yangtools.yang.common.Empty;
 
 /**
  * An {@link AbstractTransactionCommitCohort} for use with empty transactions. This relies on the fact that no backends
@@ -30,19 +32,19 @@ final class EmptyTransactionCommitCohort extends AbstractTransactionCommitCohort
     }
 
     @Override
-    public ListenableFuture<Void> preCommit() {
-        return VOID_FUTURE;
+    public ListenableFuture<Empty> preCommit() {
+        return EMPTY_FUTURE;
     }
 
     @Override
-    public ListenableFuture<Void> abort() {
+    public ListenableFuture<Empty> abort() {
         complete();
-        return VOID_FUTURE;
+        return EMPTY_FUTURE;
     }
 
     @Override
-    public ListenableFuture<Void> commit() {
+    public ListenableFuture<CommitInfo> commit() {
         complete();
-        return VOID_FUTURE;
+        return CommitInfo.emptyFluentFuture();
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/FailedDataTreeModification.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/FailedDataTreeModification.java
new file mode 100644 (file)
index 0000000..63dbc92
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.databroker.actors.dds;
+
+import static java.util.Objects.requireNonNull;
+
+import java.util.Optional;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModificationCursor;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
+
+/**
+ * A {@link CursorAwareDataTreeModification} which does not really do anything and throws an
+ * {@link FailedDataTreeModificationException} for most of its operations. Used in case we when
+ * {@link DataTreeSnapshot#newModification()} fails, see {@link LocalReadWriteProxyTransaction} for details. Surrounding
+ * code should guard against invocation of most of these methods.
+ */
+record FailedDataTreeModification(
+        @NonNull EffectiveModelContext modelContext,
+        @NonNull Exception cause) implements CursorAwareDataTreeModification {
+
+    FailedDataTreeModification {
+        requireNonNull(modelContext);
+        requireNonNull(cause);
+    }
+
+    @Override
+    public void delete(final YangInstanceIdentifier path) {
+        throw ex();
+    }
+
+    @Override
+    public void merge(final YangInstanceIdentifier path, final NormalizedNode data) {
+        throw ex();
+    }
+
+    @Override
+    public void write(final YangInstanceIdentifier path, final NormalizedNode data) {
+        throw ex();
+    }
+
+    @Override
+    public void ready() {
+        // No-op
+    }
+
+    @Override
+    public void applyToCursor(final DataTreeModificationCursor cursor) {
+        throw ex();
+    }
+
+    @Override
+    public Optional<NormalizedNode> readNode(final YangInstanceIdentifier path) {
+        throw ex();
+    }
+
+    @Override
+    public CursorAwareDataTreeModification newModification() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public Optional<? extends DataTreeModificationCursor> openCursor(final YangInstanceIdentifier path) {
+        throw ex();
+    }
+
+    private @NonNull FailedDataTreeModificationException ex() {
+        return new FailedDataTreeModificationException(cause);
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/FailedDataTreeModificationException.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/FailedDataTreeModificationException.java
new file mode 100644 (file)
index 0000000..5f860a0
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.databroker.actors.dds;
+
+import static java.util.Objects.requireNonNull;
+
+/**
+ * A box {@link RuntimeException} thrown by {@link FailedDataTreeModification} from its user-facing methods.
+ */
+final class FailedDataTreeModificationException extends RuntimeException {
+    private static final long serialVersionUID = 1L;
+
+    FailedDataTreeModificationException(final Exception cause) {
+        super(null, requireNonNull(cause), false, false);
+    }
+}
index 0a5ead9d9bca1c049c98c3665caeeac5f3ee9a6a..6c4006e93f0a2197937ce7ff90b3b7c5a3e8b00a 100644 (file)
@@ -26,14 +26,16 @@ import org.opendaylight.controller.cluster.access.commands.ReadTransactionSucces
 import org.opendaylight.controller.cluster.access.commands.TransactionPurgeRequest;
 import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
 import org.opendaylight.controller.cluster.access.concepts.Response;
+import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.util.AbstractDataTreeModificationCursor;
+import org.opendaylight.mdsal.common.api.ReadFailedException;
 import org.opendaylight.yangtools.util.concurrent.FluentFutures;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -49,13 +51,12 @@ import org.slf4j.LoggerFactory;
  * <p>
  * This class is not thread-safe as usual with transactions. Since it does not interact with the backend until the
  * transaction is submitted, at which point this class gets out of the picture, this is not a cause for concern.
- *
- * @author Robert Varga
  */
-abstract class LocalProxyTransaction extends AbstractProxyTransaction {
+abstract sealed class LocalProxyTransaction extends AbstractProxyTransaction
+        permits LocalReadOnlyProxyTransaction, LocalReadWriteProxyTransaction {
     private static final Logger LOG = LoggerFactory.getLogger(LocalProxyTransaction.class);
 
-    private final TransactionIdentifier identifier;
+    private final @NonNull TransactionIdentifier identifier;
 
     LocalProxyTransaction(final ProxyHistory parent, final TransactionIdentifier identifier, final boolean isDone) {
         super(parent, isDone);
@@ -76,13 +77,25 @@ abstract class LocalProxyTransaction extends AbstractProxyTransaction {
             @Nullable Consumer<Response<?, ?>> callback, long enqueuedTicks);
 
     @Override
-    final FluentFuture<Boolean> doExists(final YangInstanceIdentifier path) {
-        return FluentFutures.immediateFluentFuture(readOnlyView().readNode(path).isPresent());
+    FluentFuture<Boolean> doExists(final YangInstanceIdentifier path) {
+        final boolean result;
+        try {
+            result = readOnlyView().readNode(path).isPresent();
+        } catch (FailedDataTreeModificationException e) {
+            return FluentFutures.immediateFailedFluentFuture(ReadFailedException.MAPPER.apply(e));
+        }
+        return FluentFutures.immediateBooleanFluentFuture(result);
     }
 
     @Override
-    final FluentFuture<Optional<NormalizedNode>> doRead(final YangInstanceIdentifier path) {
-        return FluentFutures.immediateFluentFuture(readOnlyView().readNode(path));
+    FluentFuture<Optional<NormalizedNode>> doRead(final YangInstanceIdentifier path) {
+        final Optional<NormalizedNode> result;
+        try {
+            result = readOnlyView().readNode(path);
+        } catch (FailedDataTreeModificationException e) {
+            return FluentFutures.immediateFailedFluentFuture(ReadFailedException.MAPPER.apply(e));
+        }
+        return FluentFutures.immediateFluentFuture(result);
     }
 
     @Override
@@ -96,35 +109,7 @@ abstract class LocalProxyTransaction extends AbstractProxyTransaction {
         if (request instanceof AbortLocalTransactionRequest) {
             enqueueAbort(request, callback, enqueuedTicks);
         } else {
-            throw new IllegalArgumentException("Unhandled request" + request);
-        }
-    }
-
-    private boolean handleReadRequest(final TransactionRequest<?> request, final Consumer<Response<?, ?>> callback) {
-        // Note we delay completion of read requests to limit the scope at which the client can run, as they have
-        // listeners, which we do not want to execute while we are reconnecting.
-        if (request instanceof ReadTransactionRequest) {
-            final YangInstanceIdentifier path = ((ReadTransactionRequest) request).getPath();
-            final Optional<NormalizedNode> result = readOnlyView().readNode(path);
-            if (callback != null) {
-                // XXX: FB does not see that callback is final, on stack and has be check for non-null.
-                final Consumer<Response<?, ?>> fbIsStupid = requireNonNull(callback);
-                executeInActor(() -> fbIsStupid.accept(new ReadTransactionSuccess(request.getTarget(),
-                    request.getSequence(), result)));
-            }
-            return true;
-        } else if (request instanceof ExistsTransactionRequest) {
-            final YangInstanceIdentifier path = ((ExistsTransactionRequest) request).getPath();
-            final boolean result = readOnlyView().readNode(path).isPresent();
-            if (callback != null) {
-                // XXX: FB does not see that callback is final, on stack and has be check for non-null.
-                final Consumer<Response<?, ?>> fbIsStupid = requireNonNull(callback);
-                executeInActor(() -> fbIsStupid.accept(new ExistsTransactionSuccess(request.getTarget(),
-                    request.getSequence(), result)));
-            }
-            return true;
-        } else {
-            return false;
+            throw unhandledRequest(request);
         }
     }
 
@@ -142,7 +127,7 @@ abstract class LocalProxyTransaction extends AbstractProxyTransaction {
             // hence we can skip sequence increments.
             LOG.debug("Not replaying {}", request);
         } else {
-            throw new IllegalArgumentException("Unhandled request " + request);
+            throw unhandledRequest(request);
         }
     }
 
@@ -162,15 +147,56 @@ abstract class LocalProxyTransaction extends AbstractProxyTransaction {
         } else if (request instanceof TransactionPurgeRequest) {
             enqueuePurge(callback);
         } else {
-            throw new IllegalArgumentException("Unhandled request " + request);
+            throw unhandledRequest(request);
+        }
+    }
+
+    @NonNull Response<?, ?> handleExistsRequest(final @NonNull DataTreeSnapshot snapshot,
+            final @NonNull ExistsTransactionRequest request) {
+        try {
+            return new ExistsTransactionSuccess(request.getTarget(), request.getSequence(),
+                snapshot.readNode(request.getPath()).isPresent());
+        } catch (FailedDataTreeModificationException e) {
+            return request.toRequestFailure(new RuntimeRequestException("Failed to access data",
+                ReadFailedException.MAPPER.apply(e)));
+        }
+    }
+
+    @NonNull Response<?, ?> handleReadRequest(final @NonNull DataTreeSnapshot snapshot,
+            final @NonNull ReadTransactionRequest request) {
+        try {
+            return new ReadTransactionSuccess(request.getTarget(), request.getSequence(),
+                snapshot.readNode(request.getPath()));
+        } catch (FailedDataTreeModificationException e) {
+            return request.toRequestFailure(new RuntimeRequestException("Failed to access data",
+                ReadFailedException.MAPPER.apply(e)));
+        }
+    }
+
+    private boolean handleReadRequest(final TransactionRequest<?> request, final Consumer<Response<?, ?>> callback) {
+        // Note we delay completion of read requests to limit the scope at which the client can run, as they have
+        // listeners, which we do not want to execute while we are reconnecting.
+        if (request instanceof ReadTransactionRequest) {
+            if (callback != null) {
+                final var response = handleReadRequest(readOnlyView(), (ReadTransactionRequest) request);
+                executeInActor(() -> callback.accept(response));
+            }
+            return true;
+        } else if (request instanceof ExistsTransactionRequest) {
+            if (callback != null) {
+                final var response = handleExistsRequest(readOnlyView(), (ExistsTransactionRequest) request);
+                executeInActor(() -> callback.accept(response));
+            }
+            return true;
+        } else {
+            return false;
         }
     }
 
     @Override
     final void forwardToRemote(final RemoteProxyTransaction successor, final TransactionRequest<?> request,
                          final Consumer<Response<?, ?>> callback) {
-        if (request instanceof CommitLocalTransactionRequest) {
-            final CommitLocalTransactionRequest req = (CommitLocalTransactionRequest) request;
+        if (request instanceof final CommitLocalTransactionRequest req) {
             final DataTreeModification mod = req.getModification();
 
             LOG.debug("Applying modification {} to successor {}", mod, successor);
@@ -203,7 +229,7 @@ abstract class LocalProxyTransaction extends AbstractProxyTransaction {
         } else if (request instanceof ModifyTransactionRequest) {
             successor.handleForwardedRequest(request, callback);
         } else {
-            throwUnhandledRequest(request);
+            throw unhandledRequest(request);
         }
     }
 
@@ -215,16 +241,12 @@ abstract class LocalProxyTransaction extends AbstractProxyTransaction {
         } else if (request instanceof TransactionPurgeRequest) {
             successor.enqueuePurge(callback);
         } else {
-            throwUnhandledRequest(request);
+            throw unhandledRequest(request);
         }
 
         LOG.debug("Forwarded request {} to successor {}", request, successor);
     }
 
-    private static void throwUnhandledRequest(final TransactionRequest<?> request) {
-        throw new IllegalArgumentException("Unhandled request " + request);
-    }
-
     void sendAbort(final TransactionRequest<?> request, final Consumer<Response<?, ?>> callback) {
         sendRequest(request, callback);
     }
index 8cdbcf878ca55ec904ea84953ea39fc717ef322b..6d019af1a19383b460ff95524a02b501cb12aa30 100644 (file)
@@ -7,8 +7,8 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
-import static com.google.common.base.Preconditions.checkNotNull;
 import static com.google.common.base.Verify.verify;
+import static com.google.common.base.Verify.verifyNotNull;
 import static java.util.Objects.requireNonNull;
 
 import java.util.Optional;
@@ -20,7 +20,7 @@ import org.opendaylight.controller.cluster.access.concepts.Response;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
 
 /**
  * A read-only specialization of {@link LocalProxyTransaction}. This class is NOT thread-safe.
@@ -39,7 +39,7 @@ final class LocalReadOnlyProxyTransaction extends LocalProxyTransaction {
     LocalReadOnlyProxyTransaction(final ProxyHistory parent, final TransactionIdentifier identifier) {
         super(parent, identifier, true);
         // It is an error to touch snapshot once we are DONE
-        this.snapshot = null;
+        snapshot = null;
     }
 
     @Override
@@ -49,7 +49,7 @@ final class LocalReadOnlyProxyTransaction extends LocalProxyTransaction {
 
     @Override
     DataTreeSnapshot readOnlyView() {
-        return checkNotNull(snapshot, "Transaction %s is DONE", getIdentifier());
+        return verifyNotNull(snapshot, "Transaction %s is DONE", getIdentifier());
     }
 
     @Override
@@ -95,7 +95,7 @@ final class LocalReadOnlyProxyTransaction extends LocalProxyTransaction {
     private static void commonModifyTransactionRequest(final ModifyTransactionRequest request) {
         verify(request.getModifications().isEmpty());
 
-        final PersistenceProtocol protocol = request.getPersistenceProtocol().get();
+        final PersistenceProtocol protocol = request.getPersistenceProtocol().orElseThrow();
         verify(protocol == PersistenceProtocol.ABORT);
     }
 }
index c58f834dd69fc452f65729cacddc5d75b30bc2ba..47ae6a2bc7a27ae60fc1a8b094411c65c03d4a21 100644 (file)
@@ -7,8 +7,12 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
-import com.google.common.base.Preconditions;
-import com.google.common.base.Verify;
+import static com.google.common.base.Preconditions.checkState;
+import static com.google.common.base.Verify.verify;
+import static com.google.common.base.Verify.verifyNotNull;
+
+import com.google.common.util.concurrent.FluentFuture;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.Optional;
 import java.util.OptionalLong;
 import java.util.function.BiConsumer;
@@ -19,9 +23,11 @@ import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.commands.AbortLocalTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.AbstractLocalTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.CommitLocalTransactionRequest;
+import org.opendaylight.controller.cluster.access.commands.ExistsTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.ModifyTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.ModifyTransactionRequestBuilder;
 import org.opendaylight.controller.cluster.access.commands.PersistenceProtocol;
+import org.opendaylight.controller.cluster.access.commands.ReadTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.TransactionAbortRequest;
 import org.opendaylight.controller.cluster.access.commands.TransactionDelete;
 import org.opendaylight.controller.cluster.access.commands.TransactionDoCommitRequest;
@@ -31,16 +37,19 @@ import org.opendaylight.controller.cluster.access.commands.TransactionPreCommitR
 import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.TransactionWrite;
 import org.opendaylight.controller.cluster.access.concepts.Response;
+import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.util.AbstractDataTreeModificationCursor;
+import org.opendaylight.mdsal.common.api.ReadFailedException;
+import org.opendaylight.yangtools.util.concurrent.FluentFutures;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.CursorAwareDataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.CursorAwareDataTreeSnapshot;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModificationCursor;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModificationCursor;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -83,10 +92,25 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction {
      */
     private Exception recordedFailure;
 
+    @SuppressWarnings("checkstyle:IllegalCatch")
     LocalReadWriteProxyTransaction(final ProxyHistory parent, final TransactionIdentifier identifier,
-        final DataTreeSnapshot snapshot) {
+            final DataTreeSnapshot snapshot) {
         super(parent, identifier, false);
-        modification = (CursorAwareDataTreeModification) snapshot.newModification();
+
+        if (snapshot instanceof FailedDataTreeModification failed) {
+            recordedFailure = failed.cause();
+            modification = failed;
+        } else {
+            CursorAwareDataTreeModification mod;
+            try {
+                mod = (CursorAwareDataTreeModification) snapshot.newModification();
+            } catch (Exception e) {
+                LOG.debug("Failed to instantiate modification for {}", identifier, e);
+                recordedFailure = e;
+                mod = new FailedDataTreeModification(snapshot.modelContext(), e);
+            }
+            modification = mod;
+        }
     }
 
     LocalReadWriteProxyTransaction(final ProxyHistory parent, final TransactionIdentifier identifier) {
@@ -105,6 +129,20 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction {
         return getModification();
     }
 
+    @Override
+    FluentFuture<Boolean> doExists(final YangInstanceIdentifier path) {
+        final var ex = recordedFailure;
+        return ex == null ? super.doExists(path)
+            : FluentFutures.immediateFailedFluentFuture(ReadFailedException.MAPPER.apply(ex));
+    }
+
+    @Override
+    FluentFuture<Optional<NormalizedNode>> doRead(final YangInstanceIdentifier path) {
+        final var ex = recordedFailure;
+        return ex == null ? super.doRead(path)
+            : FluentFutures.immediateFailedFluentFuture(ReadFailedException.MAPPER.apply(ex));
+    }
+
     @Override
     @SuppressWarnings("checkstyle:IllegalCatch")
     void doDelete(final YangInstanceIdentifier path) {
@@ -177,7 +215,7 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction {
     }
 
     private void sealModification() {
-        Preconditions.checkState(sealedModification == null, "Transaction %s is already sealed", this);
+        checkState(sealedModification == null, "Transaction %s is already sealed", this);
         final CursorAwareDataTreeModification mod = getModification();
         mod.ready();
         sealedModification = mod;
@@ -220,8 +258,8 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction {
         return Optional.of(b.build());
     }
 
-    DataTreeSnapshot getSnapshot() {
-        Preconditions.checkState(sealedModification != null, "Proxy %s is not sealed yet", getIdentifier());
+    CursorAwareDataTreeSnapshot getSnapshot() {
+        checkState(sealedModification != null, "Proxy %s is not sealed yet", getIdentifier());
         return sealedModification;
     }
 
@@ -254,26 +292,26 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction {
 
         final Optional<PersistenceProtocol> maybeProtocol = request.getPersistenceProtocol();
         if (maybeProtocol.isPresent()) {
-            Verify.verify(callback != null, "Request %s has null callback", request);
+            final var cb = verifyNotNull(callback, "Request %s has null callback", request);
             if (markSealed()) {
                 sealOnly();
             }
 
-            switch (maybeProtocol.get()) {
+            switch (maybeProtocol.orElseThrow()) {
                 case ABORT:
-                    sendMethod.accept(new AbortLocalTransactionRequest(getIdentifier(), localActor()), callback);
+                    sendMethod.accept(new AbortLocalTransactionRequest(getIdentifier(), localActor()), cb);
                     break;
                 case READY:
                     // No-op, as we have already issued a sealOnly() and we are not transmitting anything
                     break;
                 case SIMPLE:
-                    sendMethod.accept(commitRequest(false), callback);
+                    sendMethod.accept(commitRequest(false), cb);
                     break;
                 case THREE_PHASE:
-                    sendMethod.accept(commitRequest(true), callback);
+                    sendMethod.accept(commitRequest(true), cb);
                     break;
                 default:
-                    throw new IllegalArgumentException("Unhandled protocol " + maybeProtocol.get());
+                    throw new IllegalArgumentException("Unhandled protocol " + maybeProtocol.orElseThrow());
             }
         }
     }
@@ -321,6 +359,22 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction {
         }
     }
 
+    @Override
+    Response<?, ?> handleExistsRequest(final DataTreeSnapshot snapshot, final ExistsTransactionRequest request) {
+        final var ex = recordedFailure;
+        return ex == null ? super.handleExistsRequest(snapshot, request)
+            : request.toRequestFailure(
+                new RuntimeRequestException("Previous modification failed", ReadFailedException.MAPPER.apply(ex)));
+    }
+
+    @Override
+    Response<?, ?> handleReadRequest(final DataTreeSnapshot snapshot, final ReadTransactionRequest request) {
+        final var ex = recordedFailure;
+        return ex == null ? super.handleReadRequest(snapshot, request)
+            : request.toRequestFailure(
+                new RuntimeRequestException("Previous modification failed", ReadFailedException.MAPPER.apply(ex)));
+    }
+
     @Override
     void forwardToLocal(final LocalProxyTransaction successor, final TransactionRequest<?> request,
             final Consumer<Response<?, ?>> callback) {
@@ -336,7 +390,7 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction {
     }
 
     private static LocalReadWriteProxyTransaction verifyLocalReadWrite(final LocalProxyTransaction successor) {
-        Verify.verify(successor instanceof LocalReadWriteProxyTransaction, "Unexpected successor %s", successor);
+        verify(successor instanceof LocalReadWriteProxyTransaction, "Unexpected successor %s", successor);
         return (LocalReadWriteProxyTransaction) successor;
     }
 
@@ -353,12 +407,12 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction {
         closedException = this::abortedException;
     }
 
+    @SuppressFBWarnings(value = "THROWS_METHOD_THROWS_RUNTIMEEXCEPTION", justification = "Replay of recorded failure")
     private @NonNull CursorAwareDataTreeModification getModification() {
         if (closedException != null) {
             throw closedException.get();
         }
-
-        return Preconditions.checkNotNull(modification, "Transaction %s is DONE", getIdentifier());
+        return verifyNotNull(modification, "Transaction %s is DONE", getIdentifier());
     }
 
     private void sendRebased(final CommitLocalTransactionRequest request, final Consumer<Response<?, ?>> callback) {
@@ -369,8 +423,18 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction {
         // Rebase old modification on new data tree.
         final CursorAwareDataTreeModification mod = getModification();
 
-        try (DataTreeModificationCursor cursor = mod.openCursor()) {
-            request.getModification().applyToCursor(cursor);
+        if (!(mod instanceof FailedDataTreeModification)) {
+            request.getDelayedFailure().ifPresentOrElse(failure -> {
+                if (recordedFailure == null) {
+                    recordedFailure = failure;
+                } else {
+                    recordedFailure.addSuppressed(failure);
+                }
+            }, () -> {
+                try (DataTreeModificationCursor cursor = mod.openCursor()) {
+                    request.getModification().applyToCursor(cursor);
+                }
+            });
         }
 
         if (markSealed()) {
index 74aca03e8686b20d43e57a9550722fcf96eca8c3..6ab566e2fafc186fb6257ebd98606e3f36c9470c 100644 (file)
@@ -7,20 +7,20 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
-import static akka.pattern.Patterns.ask;
 import static com.google.common.base.Verify.verifyNotNull;
 
 import akka.dispatch.ExecutionContexts;
 import akka.dispatch.OnComplete;
+import akka.pattern.Patterns;
 import akka.util.Timeout;
-import com.google.common.collect.BiMap;
 import com.google.common.collect.ImmutableBiMap;
-import com.google.common.collect.ImmutableBiMap.Builder;
 import java.util.concurrent.CompletionStage;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.TimeUnit;
+import java.util.stream.Stream;
 import org.checkerframework.checker.lock.qual.GuardedBy;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.client.BackendInfoResolver;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.datastore.shardmanager.RegisterForShardAvailabilityChanges;
@@ -52,19 +52,20 @@ final class ModuleShardBackendResolver extends AbstractShardBackendResolver {
     @GuardedBy("this")
     private long nextShard = 1;
 
-    private volatile BiMap<String, Long> shards = ImmutableBiMap.of(DefaultShardStrategy.DEFAULT_SHARD, 0L);
+    private volatile ImmutableBiMap<String, Long> shards = ImmutableBiMap.of(DefaultShardStrategy.DEFAULT_SHARD, 0L);
 
     // FIXME: we really need just ActorContext.findPrimaryShardAsync()
     ModuleShardBackendResolver(final ClientIdentifier clientId, final ActorUtils actorUtils) {
         super(clientId, actorUtils);
 
-        shardAvailabilityChangesRegFuture = ask(actorUtils.getShardManager(), new RegisterForShardAvailabilityChanges(
-            this::onShardAvailabilityChange), Timeout.apply(60, TimeUnit.MINUTES))
+        shardAvailabilityChangesRegFuture = Patterns.ask(actorUtils.getShardManager(),
+            new RegisterForShardAvailabilityChanges(this::onShardAvailabilityChange),
+            Timeout.apply(60, TimeUnit.MINUTES))
                 .map(reply -> (Registration)reply, ExecutionContexts.global());
 
         shardAvailabilityChangesRegFuture.onComplete(new OnComplete<Registration>() {
             @Override
-            public void onComplete(Throwable failure, Registration reply) {
+            public void onComplete(final Throwable failure, final Registration reply) {
                 if (failure != null) {
                     LOG.error("RegisterForShardAvailabilityChanges failed", failure);
                 }
@@ -72,7 +73,7 @@ final class ModuleShardBackendResolver extends AbstractShardBackendResolver {
         }, ExecutionContexts.global());
     }
 
-    private void onShardAvailabilityChange(String shardName) {
+    private void onShardAvailabilityChange(final String shardName) {
         LOG.debug("onShardAvailabilityChange for {}", shardName);
 
         Long cookie = shards.get(shardName);
@@ -85,22 +86,26 @@ final class ModuleShardBackendResolver extends AbstractShardBackendResolver {
     }
 
     Long resolveShardForPath(final YangInstanceIdentifier path) {
-        final String shardName = actorUtils().getShardStrategyFactory().getStrategy(path).findShard(path);
+        return resolveCookie(actorUtils().getShardStrategyFactory().getStrategy(path).findShard(path));
+    }
+
+    Stream<Long> resolveAllShards() {
+        return actorUtils().getConfiguration().getAllShardNames().stream()
+            .sorted()
+            .map(this::resolveCookie);
+    }
+
+    private @NonNull Long resolveCookie(final String shardName) {
+        final Long cookie = shards.get(shardName);
+        return cookie != null ? cookie : populateShard(shardName);
+    }
+
+    private synchronized @NonNull Long populateShard(final String shardName) {
         Long cookie = shards.get(shardName);
         if (cookie == null) {
-            synchronized (this) {
-                cookie = shards.get(shardName);
-                if (cookie == null) {
-                    cookie = nextShard++;
-
-                    Builder<String, Long> builder = ImmutableBiMap.builder();
-                    builder.putAll(shards);
-                    builder.put(shardName, cookie);
-                    shards = builder.build();
-                }
-            }
+            cookie = nextShard++;
+            shards = ImmutableBiMap.<String, Long>builder().putAll(shards).put(shardName, cookie).build();
         }
-
         return cookie;
     }
 
@@ -174,14 +179,14 @@ final class ModuleShardBackendResolver extends AbstractShardBackendResolver {
     public void close() {
         shardAvailabilityChangesRegFuture.onComplete(new OnComplete<Registration>() {
             @Override
-            public void onComplete(Throwable failure, Registration reply) {
+            public void onComplete(final Throwable failure, final Registration reply) {
                 reply.close();
             }
         }, ExecutionContexts.global());
     }
 
     @Override
-    public String resolveCookieName(Long cookie) {
+    public String resolveCookieName(final Long cookie) {
         return verifyNotNull(shards.inverse().get(cookie), "Unexpected null cookie: %s", cookie);
     }
 }
index 70b5960a056d943ef8f927094e3b433fb85f3021..437effe9ae0df3264d65db37b6b2a4bf60377cf0 100644 (file)
@@ -46,8 +46,8 @@ import org.opendaylight.controller.cluster.access.concepts.RequestException;
 import org.opendaylight.controller.cluster.access.concepts.Response;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -373,7 +373,7 @@ abstract class ProxyHistory implements Identifiable<LocalHistoryIdentifier> {
     static ProxyHistory createClient(final AbstractClientHistory parent,
             final AbstractClientConnection<ShardBackendInfo> connection, final LocalHistoryIdentifier identifier) {
         final Optional<ReadOnlyDataTree> dataTree = connection.getBackendInfo().flatMap(ShardBackendInfo::getDataTree);
-        return dataTree.isPresent() ? new Local(parent, connection, identifier, dataTree.get())
+        return dataTree.isPresent() ? new Local(parent, connection, identifier, dataTree.orElseThrow())
              : new Remote(parent, connection, identifier);
     }
 
@@ -381,7 +381,7 @@ abstract class ProxyHistory implements Identifiable<LocalHistoryIdentifier> {
             final AbstractClientConnection<ShardBackendInfo> connection,
             final LocalHistoryIdentifier identifier) {
         final Optional<ReadOnlyDataTree> dataTree = connection.getBackendInfo().flatMap(ShardBackendInfo::getDataTree);
-        return dataTree.isPresent() ? new LocalSingle(parent, connection, identifier, dataTree.get())
+        return dataTree.isPresent() ? new LocalSingle(parent, connection, identifier, dataTree.orElseThrow())
              : new RemoteSingle(parent, connection, identifier);
     }
 
index 824a2f9b31ebed9c0f02f69c366061002604db92..946e3341fd8f7778fdbf940299deb72112a61284 100644 (file)
@@ -49,7 +49,7 @@ import org.opendaylight.yangtools.util.concurrent.FluentFutures;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -64,18 +64,14 @@ import org.slf4j.LoggerFactory;
  * <p>
  * This class is not safe to access from multiple application threads, as is usual for transactions. Its internal state
  * transitions based on backend responses are thread-safe.
- *
- * @author Robert Varga
  */
 final class RemoteProxyTransaction extends AbstractProxyTransaction {
     private static final Logger LOG = LoggerFactory.getLogger(RemoteProxyTransaction.class);
 
-    // FIXME: make this tuneable
-    private static final int REQUEST_MAX_MODIFICATIONS = 1000;
-
     private final ModifyTransactionRequestBuilder builder;
     private final boolean sendReadyOnSeal;
     private final boolean snapshotOnly;
+    private final int maxModifications;
 
     private boolean builderBusy;
 
@@ -87,6 +83,7 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
         this.snapshotOnly = snapshotOnly;
         this.sendReadyOnSeal = sendReadyOnSeal;
         builder = new ModifyTransactionRequestBuilder(identifier, localActor());
+        maxModifications = parent.parent().actorUtils().getDatastoreContext().getShardBatchedModificationCount();
     }
 
     @Override
@@ -169,7 +166,7 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
 
     private void sendModification(final TransactionRequest<?> request, final OptionalLong enqueuedTicks) {
         if (enqueuedTicks.isPresent()) {
-            enqueueRequest(request, response -> completeModify(request, response), enqueuedTicks.getAsLong());
+            enqueueRequest(request, response -> completeModify(request, response), enqueuedTicks.orElseThrow());
         } else {
             sendRequest(request, response -> completeModify(request, response));
         }
@@ -184,7 +181,7 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
             ensureInitializedBuilder();
 
             builder.addModification(modification);
-            if (builder.size() >= REQUEST_MAX_MODIFICATIONS) {
+            if (builder.size() >= maxModifications) {
                 flushBuilder(enqueuedTicks);
             }
         } else {
@@ -205,8 +202,8 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
 
     private Exception recordFailedResponse(final Response<?, ?> response) {
         final Exception failure;
-        if (response instanceof RequestFailure) {
-            final RequestException cause = ((RequestFailure<?, ?>) response).getCause();
+        if (response instanceof RequestFailure<?, ?> requestFailure) {
+            final RequestException cause = requestFailure.getCause();
             failure = cause instanceof RequestTimeoutException
                     ? new DataStoreUnavailableException(cause.getMessage(), cause) : cause;
         } else {
@@ -230,8 +227,8 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
             final Response<?, ?> response) {
         LOG.debug("Exists request for {} completed with {}", path, response);
 
-        if (response instanceof ExistsTransactionSuccess) {
-            future.set(((ExistsTransactionSuccess) response).getExists());
+        if (response instanceof ExistsTransactionSuccess success) {
+            future.set(success.getExists());
         } else {
             failReadFuture(future, "Error executing exists request for path " + path, response);
         }
@@ -243,8 +240,8 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
             final Response<?, ?> response) {
         LOG.debug("Read request for {} completed with {}", path, response);
 
-        if (response instanceof ReadTransactionSuccess) {
-            future.set(((ReadTransactionSuccess) response).getData());
+        if (response instanceof ReadTransactionSuccess success) {
+            future.set(success.getData());
         } else {
             failReadFuture(future, "Error reading data for path " + path, response);
         }
@@ -303,19 +300,19 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
     }
 
     void handleForwardedRequest(final TransactionRequest<?> request, final Consumer<Response<?, ?>> callback) {
-        if (request instanceof ModifyTransactionRequest) {
-            handleForwardedModifyTransactionRequest(callback, (ModifyTransactionRequest) request);
-        } else if (request instanceof ReadTransactionRequest) {
+        if (request instanceof ModifyTransactionRequest modifyRequest) {
+            handleForwardedModifyTransactionRequest(callback, modifyRequest);
+        } else if (request instanceof ReadTransactionRequest readRequest) {
             ensureFlushedBuider();
             sendRequest(new ReadTransactionRequest(getIdentifier(), nextSequence(), localActor(),
-                ((ReadTransactionRequest) request).getPath(), isSnapshotOnly()), resp -> {
+                readRequest.getPath(), isSnapshotOnly()), resp -> {
                     recordFinishedRequest(resp);
                     callback.accept(resp);
                 });
-        } else if (request instanceof ExistsTransactionRequest) {
+        } else if (request instanceof ExistsTransactionRequest existsRequest) {
             ensureFlushedBuider();
             sendRequest(new ExistsTransactionRequest(getIdentifier(), nextSequence(), localActor(),
-                ((ExistsTransactionRequest) request).getPath(), isSnapshotOnly()), resp -> {
+                existsRequest.getPath(), isSnapshotOnly()), resp -> {
                     recordFinishedRequest(resp);
                     callback.accept(resp);
                 });
@@ -336,7 +333,7 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
         } else if (request instanceof TransactionPurgeRequest) {
             enqueuePurge(callback);
         } else {
-            throw new IllegalArgumentException("Unhandled request {}" + request);
+            throw unhandledRequest(request);
         }
     }
 
@@ -355,7 +352,7 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
             }
 
             final TransactionRequest<?> tmp;
-            switch (maybeProto.get()) {
+            switch (maybeProto.orElseThrow()) {
                 case ABORT:
                     tmp = abortRequest();
                     sendRequest(tmp, resp -> {
@@ -385,7 +382,7 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
                     });
                     break;
                 default:
-                    throw new IllegalArgumentException("Unhandled protocol " + maybeProto.get());
+                    throw new IllegalArgumentException("Unhandled protocol " + maybeProto.orElseThrow());
             }
         }
     }
@@ -399,12 +396,12 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
     @Override
     void handleReplayedLocalRequest(final AbstractLocalTransactionRequest<?> request,
             final Consumer<Response<?, ?>> callback, final long enqueuedTicks) {
-        if (request instanceof CommitLocalTransactionRequest) {
-            replayLocalCommitRequest((CommitLocalTransactionRequest) request, callback, enqueuedTicks);
+        if (request instanceof CommitLocalTransactionRequest commitRequest) {
+            replayLocalCommitRequest(commitRequest, callback, enqueuedTicks);
         } else if (request instanceof AbortLocalTransactionRequest) {
             enqueueRequest(abortRequest(), callback, enqueuedTicks);
         } else {
-            throw new IllegalStateException("Unhandled request " + request);
+            throw unhandledRequest(request);
         }
     }
 
@@ -439,19 +436,19 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
         final Consumer<Response<?, ?>> cb = callback != null ? callback : resp -> { /* NOOP */ };
         final OptionalLong optTicks = OptionalLong.of(enqueuedTicks);
 
-        if (request instanceof ModifyTransactionRequest) {
-            handleReplayedModifyTransactionRequest(enqueuedTicks, cb, (ModifyTransactionRequest) request);
-        } else if (request instanceof ReadTransactionRequest) {
+        if (request instanceof ModifyTransactionRequest modifyRequest) {
+            handleReplayedModifyTransactionRequest(enqueuedTicks, cb, modifyRequest);
+        } else if (request instanceof ReadTransactionRequest readRequest) {
             ensureFlushedBuider(optTicks);
             enqueueRequest(new ReadTransactionRequest(getIdentifier(), nextSequence(), localActor(),
-                ((ReadTransactionRequest) request).getPath(), isSnapshotOnly()), resp -> {
+                readRequest.getPath(), isSnapshotOnly()), resp -> {
                     recordFinishedRequest(resp);
                     cb.accept(resp);
                 }, enqueuedTicks);
-        } else if (request instanceof ExistsTransactionRequest) {
+        } else if (request instanceof ExistsTransactionRequest existsRequest) {
             ensureFlushedBuider(optTicks);
             enqueueRequest(new ExistsTransactionRequest(getIdentifier(), nextSequence(), localActor(),
-                ((ExistsTransactionRequest) request).getPath(), isSnapshotOnly()), resp -> {
+                existsRequest.getPath(), isSnapshotOnly()), resp -> {
                     recordFinishedRequest(resp);
                     cb.accept(resp);
                 }, enqueuedTicks);
@@ -472,14 +469,13 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
             enqueueDoAbort(callback, enqueuedTicks);
         } else if (request instanceof TransactionPurgeRequest) {
             enqueuePurge(callback, enqueuedTicks);
-        } else if (request instanceof IncrementTransactionSequenceRequest) {
-            final IncrementTransactionSequenceRequest req = (IncrementTransactionSequenceRequest) request;
+        } else if (request instanceof IncrementTransactionSequenceRequest req) {
             ensureFlushedBuider(optTicks);
             enqueueRequest(new IncrementTransactionSequenceRequest(getIdentifier(), nextSequence(), localActor(),
                 snapshotOnly, req.getIncrement()), callback, enqueuedTicks);
             incrementSequence(req.getIncrement());
         } else {
-            throw new IllegalArgumentException("Unhandled request {}" + request);
+            throw unhandledRequest(request);
         }
     }
 
@@ -496,7 +492,7 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
             }
 
             final TransactionRequest<?> tmp;
-            switch (maybeProto.get()) {
+            switch (maybeProto.orElseThrow()) {
                 case ABORT:
                     tmp = abortRequest();
                     enqueueRequest(tmp, resp -> {
@@ -526,7 +522,7 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
                     }, enqueuedTicks);
                     break;
                 default:
-                    throw new IllegalArgumentException("Unhandled protocol " + maybeProto.get());
+                    throw new IllegalArgumentException("Unhandled protocol " + maybeProto.orElseThrow());
             }
         }
     }
index 0958aade7156a304dbbb41fbac9755b2f8d79c34..95ce87ca9516036d4d4eca4c88aa76c8af540c0f 100644 (file)
@@ -17,7 +17,7 @@ import java.util.Optional;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.client.BackendInfo;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
 
 /**
  * Combined backend tracking. Aside from usual {@link BackendInfo}, this object also tracks the cookie assigned
index aaaa88e8b6b2494e535ecab198f0f82c590f5ebd..984a4e4f0c81158fe90b404600e6ab8f9efec8ef 100644 (file)
@@ -7,6 +7,7 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import java.util.stream.Stream;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
@@ -18,7 +19,7 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
  */
 final class SimpleDataStoreClientBehavior extends AbstractDataStoreClientBehavior {
     // Pre-boxed instance
-    private static final Long ZERO = Long.valueOf(0);
+    private static final Long ZERO = 0L;
 
     private SimpleDataStoreClientBehavior(final ClientActorContext context,
             final SimpleShardBackendResolver resolver) {
@@ -34,4 +35,9 @@ final class SimpleDataStoreClientBehavior extends AbstractDataStoreClientBehavio
     Long resolveShardForPath(final YangInstanceIdentifier path) {
         return ZERO;
     }
+
+    @Override
+    Stream<Long> resolveAllShards() {
+        return Stream.of(ZERO);
+    }
 }
index f9fffea025bbcdbe3952ae28bfbf084e12364129..6433b6b5878be65d11507dbfd14d25d73fbe7eb5 100644 (file)
@@ -7,8 +7,10 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
-import com.google.common.base.Preconditions;
-import com.google.common.base.Verify;
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Verify.verify;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.util.concurrent.AbstractFuture;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -44,11 +46,10 @@ class VotingFuture<T> extends AbstractFuture<T> {
     private volatile int neededVotes;
 
     VotingFuture(final T result, final int requiredVotes) {
-        Preconditions.checkArgument(requiredVotes > 0);
+        this.result = requireNonNull(result);
+        checkArgument(requiredVotes > 0);
         this.neededVotes = requiredVotes;
 
-        // null is okay to allow Void type
-        this.result = result;
     }
 
     void voteYes() {
@@ -70,7 +71,7 @@ class VotingFuture<T> extends AbstractFuture<T> {
 
     private boolean castVote() {
         final int votes = VOTES_UPDATER.decrementAndGet(this);
-        Verify.verify(votes >= 0);
+        verify(votes >= 0);
         return votes == 0;
     }
 
index 70fef4197e3c4a8f4faa71e59753a57ef70708cc..05edad2cf1f72867110f5a3798a6570365408682 100644 (file)
@@ -7,7 +7,6 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
-import static com.google.common.base.Preconditions.checkArgument;
 import static java.util.Objects.requireNonNull;
 
 import akka.actor.ActorRef;
@@ -20,11 +19,12 @@ import com.google.common.base.Throwables;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.SettableFuture;
 import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.Collection;
-import java.util.Set;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.util.List;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.common.actor.Dispatchers;
 import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
@@ -36,18 +36,16 @@ import org.opendaylight.controller.cluster.datastore.shardmanager.AbstractShardM
 import org.opendaylight.controller.cluster.datastore.shardmanager.ShardManagerCreator;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache;
-import org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener;
+import org.opendaylight.mdsal.dom.api.DOMDataBroker.CommitCohortExtension;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistration;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreTreeChangePublisher;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
-import org.opendaylight.yangtools.yang.model.api.EffectiveModelContextListener;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.duration.Duration;
@@ -55,13 +53,12 @@ import scala.concurrent.duration.Duration;
 /**
  * Base implementation of a distributed DOMStore.
  */
-public abstract class AbstractDataStore implements DistributedDataStoreInterface, EffectiveModelContextListener,
-        DatastoreContextPropertiesUpdater.Listener, DOMStoreTreeChangePublisher,
-        DOMDataTreeCommitCohortRegistry, AutoCloseable {
-
+public abstract class AbstractDataStore implements DistributedDataStoreInterface,
+        DatastoreContextPropertiesUpdater.Listener, DOMStoreTreeChangePublisher, CommitCohortExtension,
+        AutoCloseable {
     private static final Logger LOG = LoggerFactory.getLogger(AbstractDataStore.class);
 
-    private final SettableFuture<Void> readinessFuture = SettableFuture.create();
+    private final SettableFuture<Empty> readinessFuture = SettableFuture.create();
     private final ClientIdentifier identifier;
     private final DataStoreClient client;
     private final ActorUtils actorUtils;
@@ -71,6 +68,7 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
     private DatastoreInfoMXBeanImpl datastoreInfoMXBean;
 
     @SuppressWarnings("checkstyle:IllegalCatch")
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Testing overrides")
     protected AbstractDataStore(final ActorSystem actorSystem, final ClusterWrapper cluster,
             final Configuration configuration, final DatastoreContextFactory datastoreContextFactory,
             final DatastoreSnapshot restoreFromSnapshot) {
@@ -109,7 +107,7 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
             LOG.error("Failed to get actor for {}", clientProps, e);
             clientActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
             Throwables.throwIfUnchecked(e);
-            throw new RuntimeException(e);
+            throw new IllegalStateException(e);
         }
 
         identifier = client.getIdentifier();
@@ -125,21 +123,15 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
         datastoreInfoMXBean.registerMBean();
     }
 
-    @VisibleForTesting
-    protected AbstractDataStore(final ActorUtils actorUtils, final ClientIdentifier identifier) {
-        this.actorUtils = requireNonNull(actorUtils, "actorContext should not be null");
-        this.client = null;
-        this.identifier = requireNonNull(identifier);
-    }
-
     @VisibleForTesting
     protected AbstractDataStore(final ActorUtils actorUtils, final ClientIdentifier identifier,
                                 final DataStoreClient clientActor) {
         this.actorUtils = requireNonNull(actorUtils, "actorContext should not be null");
-        this.client = clientActor;
+        client = clientActor;
         this.identifier = requireNonNull(identifier);
     }
 
+    @VisibleForTesting
     protected AbstractShardManagerCreator<?> getShardManagerCreator() {
         return new ShardManagerCreator();
     }
@@ -148,17 +140,18 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
         return client;
     }
 
-    final ClientIdentifier getIdentifier() {
-        return identifier;
-    }
-
     public void setCloseable(final AutoCloseable closeable) {
         this.closeable = closeable;
     }
 
     @Override
-    public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(
-            final YangInstanceIdentifier treeId, final L listener) {
+    public final Registration registerTreeChangeListener(final YangInstanceIdentifier treeId,
+            final DOMDataTreeChangeListener listener) {
+        return registerTreeChangeListener(treeId, listener, true);
+    }
+
+    private @NonNull Registration registerTreeChangeListener(final YangInstanceIdentifier treeId,
+            final DOMDataTreeChangeListener listener, final boolean clustered) {
         requireNonNull(treeId, "treeId should not be null");
         requireNonNull(listener, "listener should not be null");
 
@@ -169,47 +162,51 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
         if (treeId.isEmpty()) {
             // User is targeting root of the datastore. If there is more than one shard, we have to register with them
             // all and perform data composition.
-            final Set<String> shardNames = actorUtils.getConfiguration().getAllShardNames();
+            final var shardNames = actorUtils.getConfiguration().getAllShardNames();
             if (shardNames.size() > 1) {
-                checkArgument(listener instanceof ClusteredDOMDataTreeChangeListener,
-                    "Cannot listen on root without non-clustered listener %s", listener);
+                if (!clustered) {
+                    throw new IllegalArgumentException(
+                        "Cannot listen on root without non-clustered listener " + listener);
+                }
                 return new RootDataTreeChangeListenerProxy<>(actorUtils, listener, shardNames);
             }
         }
 
-        final String shardName = actorUtils.getShardStrategyFactory().getStrategy(treeId).findShard(treeId);
+        final var shardName = actorUtils.getShardStrategyFactory().getStrategy(treeId).findShard(treeId);
         LOG.debug("Registering tree listener: {} for tree: {} shard: {}", listener, treeId, shardName);
 
-        final DataTreeChangeListenerProxy<L> listenerRegistrationProxy =
-                new DataTreeChangeListenerProxy<>(actorUtils, listener, treeId);
-        listenerRegistrationProxy.init(shardName);
+        return DataTreeChangeListenerProxy.of(actorUtils, listener, treeId, clustered, shardName);
+    }
 
-        return listenerRegistrationProxy;
+    @Override
+    @Deprecated(since = "9.0.0", forRemoval = true)
+    public final Registration registerLegacyTreeChangeListener(final YangInstanceIdentifier treeId,
+            final DOMDataTreeChangeListener listener) {
+        return registerTreeChangeListener(treeId, listener, false);
     }
 
     @Override
-    public <C extends DOMDataTreeCommitCohort> DOMDataTreeCommitCohortRegistration<C> registerCommitCohort(
-            final DOMDataTreeIdentifier subtree, final C cohort) {
-        YangInstanceIdentifier treeId = requireNonNull(subtree, "subtree should not be null").getRootIdentifier();
+    // Non-final for testing
+    public Registration registerCommitCohort(final DOMDataTreeIdentifier subtree,
+            final DOMDataTreeCommitCohort cohort) {
+        YangInstanceIdentifier treeId = requireNonNull(subtree, "subtree should not be null").path();
         requireNonNull(cohort, "listener should not be null");
 
 
         final String shardName = actorUtils.getShardStrategyFactory().getStrategy(treeId).findShard(treeId);
         LOG.debug("Registering cohort: {} for tree: {} shard: {}", cohort, treeId, shardName);
 
-        DataTreeCohortRegistrationProxy<C> cohortProxy =
-                new DataTreeCohortRegistrationProxy<>(actorUtils, subtree, cohort);
+        final var cohortProxy = new DataTreeCohortRegistrationProxy<>(actorUtils, subtree, cohort);
         cohortProxy.init(shardName);
         return cohortProxy;
     }
 
-    @Override
     public void onModelContextUpdated(final EffectiveModelContext newModelContext) {
         actorUtils.setSchemaContext(newModelContext);
     }
 
     @Override
-    public void onDatastoreContextUpdated(final DatastoreContextFactory contextFactory) {
+    public final void onDatastoreContextUpdated(final DatastoreContextFactory contextFactory) {
         LOG.info("DatastoreContext updated for data store {}", actorUtils.getDataStoreName());
 
         actorUtils.setDatastoreContext(contextFactory);
@@ -218,7 +215,7 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
 
     @Override
     @SuppressWarnings("checkstyle:IllegalCatch")
-    public void close() {
+    public final void close() {
         LOG.info("Closing data store {}", identifier);
 
         if (datastoreConfigMXBean != null) {
@@ -244,13 +241,13 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
     }
 
     @Override
-    public ActorUtils getActorUtils() {
+    public final ActorUtils getActorUtils() {
         return actorUtils;
     }
 
     // TODO: consider removing this in favor of awaitReadiness()
     @Deprecated
-    public void waitTillReady() {
+    public final void waitTillReady() {
         LOG.info("Beginning to wait for data store to become ready : {}", identifier);
 
         final Duration toWait = initialSettleTime();
@@ -269,13 +266,13 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
 
     @Beta
     @Deprecated
-    public boolean awaitReadiness() throws InterruptedException {
+    public final boolean awaitReadiness() throws InterruptedException {
         return awaitReadiness(initialSettleTime());
     }
 
     @Beta
     @Deprecated
-    public boolean awaitReadiness(final Duration toWait) throws InterruptedException {
+    public final boolean awaitReadiness(final Duration toWait) throws InterruptedException {
         try {
             if (toWait.isFinite()) {
                 try {
@@ -296,7 +293,8 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
 
     @Beta
     @Deprecated
-    public void awaitReadiness(final long timeout, final TimeUnit unit) throws InterruptedException, TimeoutException {
+    public final void awaitReadiness(final long timeout, final TimeUnit unit)
+            throws InterruptedException, TimeoutException {
         if (!awaitReadiness(Duration.create(timeout, unit))) {
             throw new TimeoutException("Shard leaders failed to settle");
         }
@@ -332,41 +330,32 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
     }
 
     @VisibleForTesting
-    SettableFuture<Void> readinessFuture() {
+    public final SettableFuture<Empty> readinessFuture() {
         return readinessFuture;
     }
 
     @Override
-    @SuppressWarnings("unchecked")
-    public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerProxyListener(
-            final YangInstanceIdentifier shardLookup, final YangInstanceIdentifier insideShard,
-            final DOMDataTreeChangeListener delegate) {
-
+    public final Registration registerProxyListener(final YangInstanceIdentifier shardLookup,
+            final YangInstanceIdentifier insideShard, final DOMDataTreeChangeListener delegate) {
         requireNonNull(shardLookup, "shardLookup should not be null");
         requireNonNull(insideShard, "insideShard should not be null");
         requireNonNull(delegate, "delegate should not be null");
 
-        final String shardName = actorUtils.getShardStrategyFactory().getStrategy(shardLookup).findShard(shardLookup);
-        LOG.debug("Registering tree listener: {} for tree: {} shard: {}, path inside shard: {}",
-                delegate,shardLookup, shardName, insideShard);
-
-        // wrap this in the ClusteredDOMDataTreeChangeLister interface
-        // since we always want clustered registration
-        final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> listenerRegistrationProxy =
-                new DataTreeChangeListenerProxy<>(actorUtils, new ClusteredDOMDataTreeChangeListener() {
-                    @Override
-                    public void onDataTreeChanged(final Collection<DataTreeCandidate> changes) {
-                        delegate.onDataTreeChanged(changes);
-                    }
-
-                    @Override
-                    public void onInitialData() {
-                        delegate.onInitialData();
-                    }
-                }, insideShard);
-        listenerRegistrationProxy.init(shardName);
-
-        return (ListenerRegistration<L>) listenerRegistrationProxy;
+        final var shardName = actorUtils.getShardStrategyFactory().getStrategy(shardLookup).findShard(shardLookup);
+        LOG.debug("Registering tree listener: {} for tree: {} shard: {}, path inside shard: {}", delegate, shardLookup,
+            shardName, insideShard);
+
+        return DataTreeChangeListenerProxy.of(actorUtils, new DOMDataTreeChangeListener() {
+            @Override
+            public void onDataTreeChanged(final List<DataTreeCandidate> changes) {
+                delegate.onDataTreeChanged(changes);
+            }
+
+            @Override
+            public void onInitialData() {
+                delegate.onInitialData();
+            }
+        }, insideShard, true, shardName);
     }
 
     private Duration initialSettleTime() {
index 4aa075f3d3fb492a6be54fff46091190703a73ef..b44bf384323ef6cd5add17fe58d2258a731b169b 100644 (file)
@@ -12,9 +12,10 @@ import java.util.Map;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStorePropertiesContainer;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStorePropertiesContainer;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 
 abstract class AbstractDatastoreContextIntrospectorFactory implements DatastoreContextIntrospectorFactory {
     @Override
@@ -35,11 +36,12 @@ abstract class AbstractDatastoreContextIntrospectorFactory implements DatastoreC
 
     @VisibleForTesting
     final @NonNull DatastoreContextIntrospector newInstance(final DatastoreContext context) {
-        final DataStorePropertiesContainer defaultPropsContainer = (DataStorePropertiesContainer)
-                serializer().fromNormalizedNode(YangInstanceIdentifier.of(DataStorePropertiesContainer.QNAME),
-                    ImmutableNodes.containerNode(DataStorePropertiesContainer.QNAME)).getValue();
-
-        return new DatastoreContextIntrospector(context, defaultPropsContainer);
+        return new DatastoreContextIntrospector(context, (DataStorePropertiesContainer) serializer()
+            .fromNormalizedNode(YangInstanceIdentifier.of(DataStorePropertiesContainer.QNAME),
+                ImmutableNodes.newContainerBuilder()
+                    .withNodeIdentifier(new NodeIdentifier(DataStorePropertiesContainer.QNAME))
+                .build())
+            .getValue());
     }
 
     abstract BindingNormalizedNodeSerializer serializer();
index 022bb7aa07e2016670d9fc4fd36e006116655322..d00db5757eb342801e8a49c28fd5d513f714398c 100644 (file)
@@ -37,7 +37,7 @@ import org.opendaylight.controller.cluster.access.concepts.RequestException;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet;
 import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -79,10 +79,10 @@ abstract class AbstractFrontendHistory implements Identifiable<LocalHistoryIdent
 
     final @Nullable TransactionSuccess<?> handleTransactionRequest(final TransactionRequest<?> request,
             final RequestEnvelope envelope, final long now) throws RequestException {
-        if (request instanceof TransactionPurgeRequest) {
-            return handleTransactionPurgeRequest((TransactionPurgeRequest) request, envelope, now);
-        } else if (request instanceof SkipTransactionsRequest) {
-            return handleSkipTransactionsRequest((SkipTransactionsRequest) request, envelope, now);
+        if (request instanceof TransactionPurgeRequest purgeRequest) {
+            return handleTransactionPurgeRequest(purgeRequest, envelope, now);
+        } else if (request instanceof SkipTransactionsRequest skipRequest) {
+            return handleSkipTransactionsRequest(skipRequest, envelope, now);
         }
 
         final TransactionIdentifier id = request.getTarget();
@@ -113,7 +113,7 @@ abstract class AbstractFrontendHistory implements Identifiable<LocalHistoryIdent
         } else if (!(request instanceof IncrementTransactionSequenceRequest)) {
             final Optional<TransactionSuccess<?>> maybeReplay = tx.replaySequence(request.getSequence());
             if (maybeReplay.isPresent()) {
-                final TransactionSuccess<?> replay = maybeReplay.get();
+                final TransactionSuccess<?> replay = maybeReplay.orElseThrow();
                 LOG.debug("{}: envelope {} replaying response {}", persistenceId(), envelope, replay);
                 return replay;
             }
@@ -170,7 +170,7 @@ abstract class AbstractFrontendHistory implements Identifiable<LocalHistoryIdent
     }
 
     private SkipTransactionsResponse handleSkipTransactionsRequest(final SkipTransactionsRequest request,
-            final RequestEnvelope envelope, final long now) throws RequestException {
+            final RequestEnvelope envelope, final long now) {
         final var first = request.getTarget();
         final var others = request.getOthers();
         final var ids = new ArrayList<UnsignedLong>(others.size() + 1);
@@ -224,13 +224,12 @@ abstract class AbstractFrontendHistory implements Identifiable<LocalHistoryIdent
     }
 
     private FrontendTransaction createTransaction(final TransactionRequest<?> request, final TransactionIdentifier id) {
-        if (request instanceof CommitLocalTransactionRequest) {
+        if (request instanceof CommitLocalTransactionRequest commitLocalRequest) {
             LOG.debug("{}: allocating new ready transaction {}", persistenceId(), id);
             tree.getStats().incrementReadWriteTransactionCount();
-            return createReadyTransaction(id, ((CommitLocalTransactionRequest) request).getModification());
+            return createReadyTransaction(id, commitLocalRequest.getModification());
         }
-        if (request instanceof AbstractReadTransactionRequest
-                && ((AbstractReadTransactionRequest<?>) request).isSnapshotOnly()) {
+        if (request instanceof AbstractReadTransactionRequest<?> readTxRequest && readTxRequest.isSnapshotOnly()) {
             LOG.debug("{}: allocating new open snapshot {}", persistenceId(), id);
             tree.getStats().incrementReadOnlyTransactionCount();
             return createOpenSnapshot(id);
index 3d1bebeb921c7ab096059ca5babb39afb4dfc273..b4c65a80c07f672172d878b8d2eee1aefee72f34 100644 (file)
@@ -12,7 +12,7 @@ import akka.actor.ActorRef;
 import akka.actor.Props;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import org.opendaylight.controller.cluster.common.actor.Dispatchers;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
index bec1a38f2453790d6a80e786e1be7d3dfa6c560b..e559ff12498be15eb9f85d4be8ce3adfec5064bf 100644 (file)
@@ -14,7 +14,7 @@ import com.google.common.base.MoreObjects;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.persisted.AbortTransactionPayload;
 import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
 
 /**
  * Abstract base for transactions running on SharrdDataTree. This class is NOT thread-safe.
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractThreePhaseCommitCohort.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractThreePhaseCommitCohort.java
deleted file mode 100644 (file)
index 7ef1cd4..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.List;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import scala.concurrent.Future;
-
-/**
- * Abstract base class for {@link DOMStoreThreePhaseCommitCohort} instances returned by this
- * implementation. In addition to the usual set of methods it also contains the list of actor
- * futures.
- */
-public abstract class AbstractThreePhaseCommitCohort<T> implements DOMStoreThreePhaseCommitCohort {
-    protected static final ListenableFuture<Void> IMMEDIATE_VOID_SUCCESS = Futures.immediateFuture(null);
-    protected static final ListenableFuture<Boolean> IMMEDIATE_BOOLEAN_SUCCESS = Futures.immediateFuture(Boolean.TRUE);
-
-    abstract List<Future<T>> getCohortFutures();
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionContextFactory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionContextFactory.java
deleted file mode 100644 (file)
index 6941d19..0000000
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import java.util.Collection;
-import java.util.Optional;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.atomic.AtomicLongFieldUpdater;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-import scala.util.Try;
-
-/**
- * Factory for creating local and remote TransactionContext instances. Maintains a cache of known local
- * transaction factories.
- */
-abstract class AbstractTransactionContextFactory<F extends LocalTransactionFactory> implements AutoCloseable {
-    private static final Logger LOG = LoggerFactory.getLogger(AbstractTransactionContextFactory.class);
-    @SuppressWarnings("rawtypes")
-    private static final AtomicLongFieldUpdater<AbstractTransactionContextFactory> TX_COUNTER_UPDATER =
-            AtomicLongFieldUpdater.newUpdater(AbstractTransactionContextFactory.class, "nextTx");
-
-    private final ConcurrentMap<String, F> knownLocal = new ConcurrentHashMap<>();
-    private final @NonNull LocalHistoryIdentifier historyId;
-    private final @NonNull ActorUtils actorUtils;
-
-    // Used via TX_COUNTER_UPDATER
-    @SuppressWarnings("unused")
-    private volatile long nextTx;
-
-    protected AbstractTransactionContextFactory(final ActorUtils actorUtils, final LocalHistoryIdentifier historyId) {
-        this.actorUtils = requireNonNull(actorUtils);
-        this.historyId = requireNonNull(historyId);
-    }
-
-    final ActorUtils getActorUtils() {
-        return actorUtils;
-    }
-
-    final LocalHistoryIdentifier getHistoryId() {
-        return historyId;
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    private TransactionContext maybeCreateLocalTransactionContext(final TransactionProxy parent,
-            final String shardName) {
-        final LocalTransactionFactory local = knownLocal.get(shardName);
-        if (local != null) {
-            LOG.debug("Tx {} - Creating local component for shard {} using factory {}", parent.getIdentifier(),
-                shardName, local);
-
-            try {
-                return createLocalTransactionContext(local, parent);
-            } catch (Exception e) {
-                return new NoOpTransactionContext(e, parent.getIdentifier());
-            }
-        }
-
-        return null;
-    }
-
-    private AbstractTransactionContextWrapper maybeCreateDirectTransactionContextWrapper(
-            final PrimaryShardInfo primaryShardInfo, final TransactionProxy parent,
-            final String shardName, final DelayedTransactionContextWrapper transactionContextWrapper) {
-        LOG.debug("Tx {}: Found primary {} for shard {}, trying to use DirectTransactionContextWrapper",
-                parent.getIdentifier(), primaryShardInfo.getPrimaryShardActor(), shardName);
-
-        updateShardInfo(shardName, primaryShardInfo);
-
-        final TransactionContext localContext = maybeCreateLocalTransactionContext(parent, shardName);
-        try {
-            if (localContext != null) {
-                LOG.debug("Tx {}: Local transaction context created successfully, using DirectTransactionWrapper",
-                        parent.getIdentifier());
-                return new DirectTransactionContextWrapper(parent.getIdentifier(), actorUtils, shardName,
-                        localContext);
-            }
-
-            LOG.debug("Tx {}: Local transaction context creation failed, using DelayedTransactionWrapper",
-                parent.getIdentifier());
-            final RemoteTransactionContextSupport remote = new RemoteTransactionContextSupport(
-                transactionContextWrapper, parent, shardName);
-            remote.setPrimaryShard(primaryShardInfo);
-            return transactionContextWrapper;
-        } finally {
-            onTransactionContextCreated(parent.getIdentifier());
-        }
-    }
-
-    private void onFindPrimaryShardSuccess(final PrimaryShardInfo primaryShardInfo, final TransactionProxy parent,
-            final String shardName, final DelayedTransactionContextWrapper transactionContextWrapper) {
-        LOG.debug("Tx {}: Found primary {} for shard {}", parent.getIdentifier(),
-                primaryShardInfo.getPrimaryShardActor(), shardName);
-
-        updateShardInfo(shardName, primaryShardInfo);
-
-        final TransactionContext localContext = maybeCreateLocalTransactionContext(parent, shardName);
-        try {
-            if (localContext != null) {
-                transactionContextWrapper.executePriorTransactionOperations(localContext);
-            } else {
-                final RemoteTransactionContextSupport remote = new RemoteTransactionContextSupport(
-                        transactionContextWrapper, parent, shardName);
-                remote.setPrimaryShard(primaryShardInfo);
-            }
-        } finally {
-            onTransactionContextCreated(parent.getIdentifier());
-        }
-    }
-
-    private void onFindPrimaryShardFailure(final Throwable failure, final TransactionProxy parent,
-            final String shardName, final DelayedTransactionContextWrapper transactionContextWrapper) {
-        LOG.debug("Tx {}: Find primary for shard {} failed", parent.getIdentifier(), shardName, failure);
-
-        try {
-            transactionContextWrapper.executePriorTransactionOperations(
-                    new NoOpTransactionContext(failure, parent.getIdentifier()));
-        } finally {
-            onTransactionContextCreated(parent.getIdentifier());
-        }
-    }
-
-    final AbstractTransactionContextWrapper newTransactionContextWrapper(final TransactionProxy parent,
-            final String shardName) {
-        final DelayedTransactionContextWrapper contextWrapper = new DelayedTransactionContextWrapper(
-                parent.getIdentifier(), actorUtils, shardName);
-        final Future<PrimaryShardInfo> findPrimaryFuture = findPrimaryShard(shardName, parent.getIdentifier());
-        if (findPrimaryFuture.isCompleted()) {
-            final Try<PrimaryShardInfo> maybe = findPrimaryFuture.value().get();
-            if (maybe.isSuccess()) {
-                return maybeCreateDirectTransactionContextWrapper(maybe.get(), parent, shardName, contextWrapper);
-            }
-
-            onFindPrimaryShardFailure(maybe.failed().get(), parent, shardName, contextWrapper);
-        } else {
-            findPrimaryFuture.onComplete(result -> {
-                if (result.isSuccess()) {
-                    onFindPrimaryShardSuccess(result.get(), parent, shardName, contextWrapper);
-                } else {
-                    onFindPrimaryShardFailure(result.failed().get(), parent, shardName, contextWrapper);
-                }
-                return null;
-            }, actorUtils.getClientDispatcher());
-        }
-        return contextWrapper;
-    }
-
-    private void updateShardInfo(final String shardName, final PrimaryShardInfo primaryShardInfo) {
-        final Optional<ReadOnlyDataTree> maybeDataTree = primaryShardInfo.getLocalShardDataTree();
-        if (maybeDataTree.isPresent()) {
-            if (!knownLocal.containsKey(shardName)) {
-                LOG.debug("Shard {} resolved to local data tree - adding local factory", shardName);
-
-                F factory = factoryForShard(shardName, primaryShardInfo.getPrimaryShardActor(), maybeDataTree.get());
-                knownLocal.putIfAbsent(shardName, factory);
-            }
-        } else if (knownLocal.containsKey(shardName)) {
-            LOG.debug("Shard {} invalidating local data tree", shardName);
-
-            knownLocal.remove(shardName);
-        }
-    }
-
-    protected final MemberName getMemberName() {
-        return historyId.getClientId().getFrontendId().getMemberName();
-    }
-
-    /**
-     * Create an identifier for the next TransactionProxy attached to this component
-     * factory.
-     * @return Transaction identifier, may not be null.
-     */
-    protected final TransactionIdentifier nextIdentifier() {
-        return new TransactionIdentifier(historyId, TX_COUNTER_UPDATER.getAndIncrement(this));
-    }
-
-    /**
-     * Find the primary shard actor.
-     *
-     * @param shardName Shard name
-     * @return Future containing shard information.
-     */
-    protected abstract Future<PrimaryShardInfo> findPrimaryShard(@NonNull String shardName,
-            @NonNull TransactionIdentifier txId);
-
-    /**
-     * Create local transaction factory for specified shard, backed by specified shard leader
-     * and data tree instance.
-     *
-     * @param shardName the shard name
-     * @param shardLeader the shard leader
-     * @param dataTree Backing data tree instance. The data tree may only be accessed in
-     *                 read-only manner.
-     * @return Transaction factory for local use.
-     */
-    protected abstract F factoryForShard(String shardName, ActorSelection shardLeader, ReadOnlyDataTree dataTree);
-
-    /**
-     * Callback invoked from child transactions to push any futures, which need to
-     * be waited for before the next transaction is allocated.
-     * @param cohortFutures Collection of futures
-     */
-    protected abstract <T> void onTransactionReady(@NonNull TransactionIdentifier transaction,
-            @NonNull Collection<Future<T>> cohortFutures);
-
-    /**
-     * Callback invoked when the internal TransactionContext has been created for a transaction.
-     *
-     * @param transactionId the ID of the transaction.
-     */
-    protected abstract void onTransactionContextCreated(@NonNull TransactionIdentifier transactionId);
-
-    private static TransactionContext createLocalTransactionContext(final LocalTransactionFactory factory,
-                                                                    final TransactionProxy parent) {
-
-        switch (parent.getType()) {
-            case READ_ONLY:
-                final DOMStoreReadTransaction readOnly = factory.newReadOnlyTransaction(parent.getIdentifier());
-                return new LocalTransactionContext(readOnly, parent.getIdentifier(), factory) {
-                    @Override
-                    DOMStoreWriteTransaction getWriteDelegate() {
-                        throw new UnsupportedOperationException();
-                    }
-
-                    @Override
-                    DOMStoreReadTransaction getReadDelegate() {
-                        return readOnly;
-                    }
-                };
-            case READ_WRITE:
-                final DOMStoreReadWriteTransaction readWrite = factory.newReadWriteTransaction(parent.getIdentifier());
-                return new LocalTransactionContext(readWrite, parent.getIdentifier(), factory) {
-                    @Override
-                    DOMStoreWriteTransaction getWriteDelegate() {
-                        return readWrite;
-                    }
-
-                    @Override
-                    DOMStoreReadTransaction getReadDelegate() {
-                        return readWrite;
-                    }
-                };
-            case WRITE_ONLY:
-                final DOMStoreWriteTransaction writeOnly = factory.newWriteOnlyTransaction(parent.getIdentifier());
-                return new LocalTransactionContext(writeOnly, parent.getIdentifier(), factory) {
-                    @Override
-                    DOMStoreWriteTransaction getWriteDelegate() {
-                        return writeOnly;
-                    }
-
-                    @Override
-                    DOMStoreReadTransaction getReadDelegate() {
-                        throw new UnsupportedOperationException();
-                    }
-                };
-            default:
-                throw new IllegalArgumentException("Invalid transaction type: " + parent.getType());
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionContextWrapper.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionContextWrapper.java
deleted file mode 100644 (file)
index 49dac87..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import java.util.Optional;
-import java.util.SortedSet;
-import java.util.concurrent.TimeUnit;
-import org.eclipse.jdt.annotation.NonNull;
-import org.eclipse.jdt.annotation.Nullable;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import scala.concurrent.Future;
-
-/**
- * A helper class that wraps an eventual TransactionContext instance. We have two specializations:
- * <ul>
- *   <li>{@link DelayedTransactionContextWrapper}, which enqueues operations towards the backend</li>
- *   <li>{@link DirectTransactionContextWrapper}, which sends operations to the backend</li>
- * </ul>
- */
-abstract class AbstractTransactionContextWrapper {
-    private final TransactionIdentifier identifier;
-    private final OperationLimiter limiter;
-    private final String shardName;
-
-    AbstractTransactionContextWrapper(@NonNull final TransactionIdentifier identifier,
-                                      @NonNull final ActorUtils actorUtils, @NonNull final String shardName) {
-        this.identifier = requireNonNull(identifier);
-        this.shardName = requireNonNull(shardName);
-        limiter = new OperationLimiter(identifier,
-            // 1 extra permit for the ready operation
-            actorUtils.getDatastoreContext().getShardBatchedModificationCount() + 1,
-            TimeUnit.MILLISECONDS.toSeconds(actorUtils.getDatastoreContext().getOperationTimeoutInMillis()));
-    }
-
-    final TransactionIdentifier getIdentifier() {
-        return identifier;
-    }
-
-    final OperationLimiter getLimiter() {
-        return limiter;
-    }
-
-    final String getShardName() {
-        return shardName;
-    }
-
-    abstract @Nullable TransactionContext getTransactionContext();
-
-    /**
-     * Either enqueue or execute specified operation.
-     *
-     * @param op Operation to (eventually) execute
-     */
-    abstract void maybeExecuteTransactionOperation(TransactionOperation op);
-
-    /**
-     * Mark the transaction as ready.
-     *
-     * @param participatingShardNames Shards which participate on the transaction
-     * @return Future indicating the transaction has been readied on the backend
-     */
-    abstract @NonNull Future<ActorSelection> readyTransaction(Optional<SortedSet<String>> participatingShardNames);
-}
index 1f87fd4259e1ae0e90bade6c153672eedf4ed6b4..e3bb074bdc6fe7cada938e48b8b88281c8e59935 100644 (file)
@@ -14,9 +14,10 @@ import com.google.common.util.concurrent.FutureCallback;
 import java.util.Optional;
 import java.util.SortedSet;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateTip;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -52,12 +53,12 @@ final class ChainedCommitCohort extends ShardDataTreeCohort {
     }
 
     @Override
-    public TransactionIdentifier getIdentifier() {
-        return delegate.getIdentifier();
+    TransactionIdentifier transactionId() {
+        return delegate.transactionId();
     }
 
     @Override
-    public void canCommit(final FutureCallback<Void> callback) {
+    public void canCommit(final FutureCallback<Empty> callback) {
         delegate.canCommit(callback);
     }
 
@@ -67,7 +68,7 @@ final class ChainedCommitCohort extends ShardDataTreeCohort {
     }
 
     @Override
-    public void abort(final FutureCallback<Void> callback) {
+    public void abort(final FutureCallback<Empty> callback) {
         delegate.abort(callback);
     }
 
index 6c0c13b3abfd6dec5ca89b1014f095e4e87c468a..120b004a6e9bf44513acc784df5efe619232abe0 100644 (file)
@@ -13,15 +13,18 @@ import static java.util.Objects.requireNonNull;
 import akka.actor.ActorRef;
 import com.google.common.primitives.UnsignedLong;
 import com.google.common.util.concurrent.FutureCallback;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.List;
 import java.util.Optional;
 import java.util.SortedSet;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortDecorator;
 import org.opendaylight.controller.cluster.datastore.modification.Modification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 final class CohortEntry {
     private final ReadWriteShardDataTreeTransaction transaction;
     private final TransactionIdentifier transactionId;
@@ -36,16 +39,16 @@ final class CohortEntry {
     private Shard shard;
 
     private CohortEntry(final ReadWriteShardDataTreeTransaction transaction, final short clientVersion) {
-        this.cohort = null;
+        cohort = null;
         this.transaction = requireNonNull(transaction);
-        this.transactionId = transaction.getIdentifier();
+        transactionId = transaction.getIdentifier();
         this.clientVersion = clientVersion;
     }
 
     private CohortEntry(final ShardDataTreeCohort cohort, final short clientVersion) {
         this.cohort = requireNonNull(cohort);
-        this.transactionId = cohort.getIdentifier();
-        this.transaction = null;
+        transactionId = cohort.transactionId();
+        transaction = null;
         this.clientVersion = clientVersion;
     }
 
@@ -90,6 +93,7 @@ final class CohortEntry {
     }
 
     @SuppressWarnings("checkstyle:IllegalCatch")
+    @SuppressFBWarnings(value = "THROWS_METHOD_THROWS_RUNTIMEEXCEPTION", justification = "Re-thrown")
     void applyModifications(final List<Modification> modifications) {
         totalBatchedModificationsReceived++;
         if (lastBatchedModificationsException == null) {
@@ -105,7 +109,7 @@ final class CohortEntry {
         }
     }
 
-    void canCommit(final FutureCallback<Void> callback) {
+    void canCommit(final FutureCallback<Empty> callback) {
         cohort.canCommit(callback);
     }
 
@@ -117,7 +121,7 @@ final class CohortEntry {
         cohort.commit(callback);
     }
 
-    void abort(final FutureCallback<Void> callback) {
+    void abort(final FutureCallback<Empty> callback) {
         cohort.abort(callback);
     }
 
index bca00ebc4a6bdbb88cd928ffe609bdb6ae1acd5f..078b45f68f9e98cdcef1cd48b190fbedddd1a065 100644 (file)
@@ -20,11 +20,9 @@ import akka.dispatch.Recover;
 import akka.pattern.Patterns;
 import akka.util.Timeout;
 import com.google.common.collect.Lists;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.AbstractMap.SimpleImmutableEntry;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map.Entry;
@@ -36,8 +34,9 @@ import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.DataTreeCohortActor.CanCommit;
 import org.opendaylight.controller.cluster.datastore.DataTreeCohortActor.Success;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.compat.java8.FutureConverters;
@@ -48,7 +47,6 @@ import scala.concurrent.Future;
  * <p/>
  * It tracks current operation and list of cohorts which successfuly finished previous phase in
  * case, if abort is necessary to invoke it only on cohort steps which are still active.
- *
  */
 class CompositeDataTreeCohort {
     private static final Logger LOG = LoggerFactory.getLogger(CompositeDataTreeCohort.class);
@@ -92,7 +90,7 @@ class CompositeDataTreeCohort {
         ABORTED
     }
 
-    static final Recover<Object> EXCEPTION_TO_MESSAGE = new Recover<Object>() {
+    static final Recover<Object> EXCEPTION_TO_MESSAGE = new Recover<>() {
         @Override
         public Failure recover(final Throwable error) {
             return new Failure(error);
@@ -101,17 +99,17 @@ class CompositeDataTreeCohort {
 
     private final DataTreeCohortActorRegistry registry;
     private final TransactionIdentifier txId;
-    private final SchemaContext schema;
+    private final EffectiveModelContext schema;
     private final Executor callbackExecutor;
     private final Timeout timeout;
 
-    private @NonNull List<Success> successfulFromPrevious = Collections.emptyList();
+    private @NonNull List<Success> successfulFromPrevious = List.of();
     private State state = State.IDLE;
 
     CompositeDataTreeCohort(final DataTreeCohortActorRegistry registry, final TransactionIdentifier transactionID,
-        final SchemaContext schema, final Executor callbackExecutor, final Timeout timeout) {
+        final EffectiveModelContext schema, final Executor callbackExecutor, final Timeout timeout) {
         this.registry = requireNonNull(registry);
-        this.txId = requireNonNull(transactionID);
+        txId = requireNonNull(transactionID);
         this.schema = requireNonNull(schema);
         this.callbackExecutor = requireNonNull(callbackExecutor);
         this.timeout = requireNonNull(timeout);
@@ -135,11 +133,11 @@ class CompositeDataTreeCohort {
                 throw new IllegalStateException("Unhandled state " + state);
         }
 
-        successfulFromPrevious = Collections.emptyList();
+        successfulFromPrevious = List.of();
         state = State.IDLE;
     }
 
-    Optional<CompletionStage<Void>> canCommit(final DataTreeCandidate tip) {
+    Optional<CompletionStage<Empty>> canCommit(final DataTreeCandidate tip) {
         if (LOG.isTraceEnabled()) {
             LOG.trace("{}: canCommit - candidate: {}", txId, tip);
         } else {
@@ -149,7 +147,7 @@ class CompositeDataTreeCohort {
         final List<CanCommit> messages = registry.createCanCommitMessages(txId, tip, schema);
         LOG.debug("{}: canCommit - messages: {}", txId, messages);
         if (messages.isEmpty()) {
-            successfulFromPrevious = Collections.emptyList();
+            successfulFromPrevious = List.of();
             changeStateFrom(State.IDLE, State.CAN_COMMIT_SUCCESSFUL);
             return Optional.empty();
         }
@@ -167,7 +165,7 @@ class CompositeDataTreeCohort {
         return Optional.of(processResponses(futures, State.CAN_COMMIT_SENT, State.CAN_COMMIT_SUCCESSFUL));
     }
 
-    Optional<CompletionStage<Void>> preCommit() {
+    Optional<CompletionStage<Empty>> preCommit() {
         LOG.debug("{}: preCommit - successfulFromPrevious: {}", txId, successfulFromPrevious);
 
         if (successfulFromPrevious.isEmpty()) {
@@ -181,7 +179,7 @@ class CompositeDataTreeCohort {
         return Optional.of(processResponses(futures, State.PRE_COMMIT_SENT, State.PRE_COMMIT_SUCCESSFUL));
     }
 
-    Optional<CompletionStage<Void>> commit() {
+    Optional<CompletionStage<Empty>> commit() {
         LOG.debug("{}: commit - successfulFromPrevious: {}", txId, successfulFromPrevious);
         if (successfulFromPrevious.isEmpty()) {
             changeStateFrom(State.PRE_COMMIT_SUCCESSFUL, State.COMMITED);
@@ -222,10 +220,10 @@ class CompositeDataTreeCohort {
         return ret;
     }
 
-    private @NonNull CompletionStage<Void> processResponses(final List<Entry<ActorRef, Future<Object>>> futures,
+    private @NonNull CompletionStage<Empty> processResponses(final List<Entry<ActorRef, Future<Object>>> futures,
             final State currentState, final State afterState) {
         LOG.debug("{}: processResponses - currentState: {}, afterState: {}", txId, currentState, afterState);
-        final CompletableFuture<Void> returnFuture = new CompletableFuture<>();
+        final CompletableFuture<Empty> returnFuture = new CompletableFuture<>();
         Future<Iterable<Object>> aggregateFuture = Futures.sequence(Lists.transform(futures, Entry::getValue),
                 ExecutionContexts.global());
 
@@ -240,14 +238,10 @@ class CompositeDataTreeCohort {
         return returnFuture;
     }
 
-    // FB issues violation for passing null to CompletableFuture#complete but it is valid and necessary when the
-    // generic type is Void.
-    @SuppressFBWarnings(value = { "NP_NONNULL_PARAM_VIOLATION", "UPM_UNCALLED_PRIVATE_METHOD" },
-            justification = "https://github.com/spotbugs/spotbugs/issues/811")
     private void processResponses(final Throwable failure, final Iterable<Object> results,
-            final State currentState, final State afterState, final CompletableFuture<Void> resultFuture) {
+            final State currentState, final State afterState, final CompletableFuture<Empty> resultFuture) {
         if (failure != null) {
-            successfulFromPrevious = Collections.emptyList();
+            successfulFromPrevious = List.of();
             resultFuture.completeExceptionally(failure);
             return;
         }
@@ -274,12 +268,12 @@ class CompositeDataTreeCohort {
                 firstEx.addSuppressed(it.next().cause());
             }
 
-            successfulFromPrevious = Collections.emptyList();
+            successfulFromPrevious = List.of();
             resultFuture.completeExceptionally(firstEx);
         } else {
             successfulFromPrevious = successful;
             changeStateFrom(currentState, afterState);
-            resultFuture.complete(null);
+            resultFuture.complete(Empty.value());
         }
     }
 
index a8a0124fcd35f71d6c8e549ff0ee6a004979f5bc..88877bc36a8e97b90711d2aeb5e89b2356c387c8 100644 (file)
@@ -12,7 +12,7 @@ import static java.util.Objects.requireNonNull;
 import com.google.common.base.MoreObjects;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeCandidate;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
 
 final class DOMDataTreeCandidateTO implements DOMDataTreeCandidate {
 
index eb444f2d8f096c64871ee59d3bce8f1fb689fb81..996fe1023dac6f21b4ae6a6c0f4bd68059694635 100644 (file)
@@ -27,9 +27,12 @@ public final class DataStoreVersions {
     public static final short FLUORINE_VERSION    =  9;
     @Deprecated
     public static final short NEON_SR2_VERSION    = 10;
+    @Deprecated
     public static final short SODIUM_SR1_VERSION  = 11;
+    @Deprecated
     public static final short PHOSPHORUS_VERSION  = 12;
-    public static final short CURRENT_VERSION     = PHOSPHORUS_VERSION;
+    public static final short POTASSIUM_VERSION   = 13;
+    public static final short CURRENT_VERSION     = POTASSIUM_VERSION;
 
     private DataStoreVersions() {
 
index 9384a84e382aceaed952efd2682d446b22f588ba..6f88d3ea986136416276fa39915c5bfe2673bee6 100644 (file)
@@ -60,9 +60,9 @@ class DataTreeChangeListenerActor extends AbstractUntypedActor {
         LOG.debug("{}: Notifying onInitialData to listener {}", logContext, listener);
 
         try {
-            this.listener.onInitialData();
+            listener.onInitialData();
         } catch (Exception e) {
-            LOG.error("{}: Error notifying listener {}", logContext, this.listener, e);
+            LOG.error("{}: Error notifying listener {}", logContext, listener, e);
         }
     }
 
@@ -75,15 +75,21 @@ class DataTreeChangeListenerActor extends AbstractUntypedActor {
             return;
         }
 
-        LOG.debug("{}: Sending {} change notification(s) {} to listener {}", logContext, message.getChanges().size(),
-                message.getChanges(), listener);
+        final var changes = message.getChanges();
+        LOG.debug("{}: Sending {} change notification(s) to listener {}", logContext, changes.size(), listener);
+        if (LOG.isTraceEnabled() && !changes.isEmpty()) {
+            LOG.trace("{}: detailed change follow", logContext);
+            for (int i = 0, size = changes.size(); i < size; ++i) {
+                LOG.trace("{}: change {}: {}", logContext, i, changes.get(i));
+            }
+        }
 
         notificationCount++;
 
         try {
-            this.listener.onDataTreeChanged(message.getChanges());
+            listener.onDataTreeChanged(changes);
         } catch (Exception e) {
-            LOG.error("{}: Error notifying listener {}", logContext, this.listener, e);
+            LOG.error("{}: Error notifying listener {}", logContext, listener, e);
         }
 
         // TODO: do we really need this?
index 0268b8e36c41a43fb986d96663a7f12d28ff13a4..be849284e7f1fabdd04f9e3255e99a5912e53f48 100644 (file)
@@ -14,20 +14,20 @@ import akka.actor.ActorSelection;
 import akka.actor.PoisonPill;
 import akka.dispatch.OnComplete;
 import com.google.common.annotations.VisibleForTesting;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.concurrent.Executor;
 import org.checkerframework.checker.lock.qual.GuardedBy;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException;
 import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeNotificationListenerRegistration;
 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeNotificationListenerReply;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
+import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
 
 /**
  * Proxy class for holding required state to lazily instantiate a listener registration with an
@@ -35,28 +35,61 @@ import scala.concurrent.Future;
  *
  * @param <T> listener type
  */
-final class DataTreeChangeListenerProxy<T extends DOMDataTreeChangeListener> extends AbstractListenerRegistration<T> {
+final class DataTreeChangeListenerProxy extends AbstractObjectRegistration<DOMDataTreeChangeListener> {
     private static final Logger LOG = LoggerFactory.getLogger(DataTreeChangeListenerProxy.class);
     private final ActorRef dataChangeListenerActor;
     private final ActorUtils actorUtils;
     private final YangInstanceIdentifier registeredPath;
+    private final boolean clustered;
 
     @GuardedBy("this")
     private ActorSelection listenerRegistrationActor;
 
-    DataTreeChangeListenerProxy(final ActorUtils actorUtils, final T listener,
-            final YangInstanceIdentifier registeredPath) {
+    @VisibleForTesting
+    private DataTreeChangeListenerProxy(final ActorUtils actorUtils, final DOMDataTreeChangeListener listener,
+            final YangInstanceIdentifier registeredPath, final boolean clustered, final String shardName) {
         super(listener);
         this.actorUtils = requireNonNull(actorUtils);
         this.registeredPath = requireNonNull(registeredPath);
-        this.dataChangeListenerActor = actorUtils.getActorSystem().actorOf(
+        this.clustered = clustered;
+        dataChangeListenerActor = actorUtils.getActorSystem().actorOf(
                 DataTreeChangeListenerActor.props(getInstance(), registeredPath)
                     .withDispatcher(actorUtils.getNotificationDispatcherPath()));
-
         LOG.debug("{}: Created actor {} for DTCL {}", actorUtils.getDatastoreContext().getLogicalStoreType(),
                 dataChangeListenerActor, listener);
     }
 
+    static @NonNull DataTreeChangeListenerProxy of(final ActorUtils actorUtils,
+            final DOMDataTreeChangeListener listener, final YangInstanceIdentifier registeredPath,
+            final boolean clustered, final String shardName) {
+        return ofTesting(actorUtils, listener, registeredPath, clustered, shardName, MoreExecutors.directExecutor());
+    }
+
+    @VisibleForTesting
+    static @NonNull DataTreeChangeListenerProxy ofTesting(final ActorUtils actorUtils,
+            final DOMDataTreeChangeListener listener, final YangInstanceIdentifier registeredPath,
+            final boolean clustered, final String shardName, final Executor executor) {
+        final var ret = new DataTreeChangeListenerProxy(actorUtils, listener, registeredPath, clustered, shardName);
+        executor.execute(() -> {
+            LOG.debug("{}: Starting discovery of shard {}", ret.logContext(), shardName);
+            actorUtils.findLocalShardAsync(shardName).onComplete(new OnComplete<>() {
+                @Override
+                public void onComplete(final Throwable failure, final ActorRef shard) {
+                    if (failure instanceof LocalShardNotFoundException) {
+                        LOG.debug("{}: No local shard found for {} - DataTreeChangeListener {} at path {} cannot be "
+                            + "registered", ret.logContext(), shardName, listener, registeredPath);
+                    } else if (failure != null) {
+                        LOG.error("{}: Failed to find local shard {} - DataTreeChangeListener {} at path {} cannot be "
+                            + "registered", ret.logContext(), shardName, listener, registeredPath, failure);
+                    } else {
+                        ret.doRegistration(shard);
+                    }
+                }
+            }, actorUtils.getClientDispatcher());
+        });
+        return ret;
+    }
+
     @Override
     protected synchronized void removeRegistration() {
         if (listenerRegistrationActor != null) {
@@ -68,27 +101,6 @@ final class DataTreeChangeListenerProxy<T extends DOMDataTreeChangeListener> ext
         dataChangeListenerActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
     }
 
-    void init(final String shardName) {
-        Future<ActorRef> findFuture = actorUtils.findLocalShardAsync(shardName);
-        findFuture.onComplete(new OnComplete<ActorRef>() {
-            @Override
-            public void onComplete(final Throwable failure, final ActorRef shard) {
-                if (failure instanceof LocalShardNotFoundException) {
-                    LOG.debug("{}: No local shard found for {} - DataTreeChangeListener {} at path {} "
-                            + "cannot be registered", logContext(), shardName, getInstance(), registeredPath);
-                } else if (failure != null) {
-                    LOG.error("{}: Failed to find local shard {} - DataTreeChangeListener {} at path {} "
-                            + "cannot be registered", logContext(), shardName, getInstance(), registeredPath,
-                            failure);
-                } else {
-                    doRegistration(shard);
-                }
-            }
-        }, actorUtils.getClientDispatcher());
-    }
-
-    @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-            justification = "https://github.com/spotbugs/spotbugs/issues/811")
     private void setListenerRegistrationActor(final ActorSelection actor) {
         if (actor == null) {
             LOG.debug("{}: Ignoring null actor on {}", logContext(), this);
@@ -97,7 +109,7 @@ final class DataTreeChangeListenerProxy<T extends DOMDataTreeChangeListener> ext
 
         synchronized (this) {
             if (!isClosed()) {
-                this.listenerRegistrationActor = actor;
+                listenerRegistrationActor = actor;
                 return;
             }
         }
@@ -106,28 +118,21 @@ final class DataTreeChangeListenerProxy<T extends DOMDataTreeChangeListener> ext
         actor.tell(CloseDataTreeNotificationListenerRegistration.getInstance(), null);
     }
 
-    @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-            justification = "https://github.com/spotbugs/spotbugs/issues/811")
     private void doRegistration(final ActorRef shard) {
-
-        Future<Object> future = actorUtils.executeOperationAsync(shard,
-                new RegisterDataTreeChangeListener(registeredPath, dataChangeListenerActor,
-                        getInstance() instanceof ClusteredDOMDataTreeChangeListener),
-                actorUtils.getDatastoreContext().getShardInitializationTimeout());
-
-        future.onComplete(new OnComplete<Object>() {
-            @Override
-            public void onComplete(final Throwable failure, final Object result) {
-                if (failure != null) {
-                    LOG.error("{}: Failed to register DataTreeChangeListener {} at path {}", logContext(),
+        actorUtils.executeOperationAsync(shard,
+            new RegisterDataTreeChangeListener(registeredPath, dataChangeListenerActor, clustered),
+            actorUtils.getDatastoreContext().getShardInitializationTimeout()).onComplete(new OnComplete<>() {
+                @Override
+                public void onComplete(final Throwable failure, final Object result) {
+                    if (failure != null) {
+                        LOG.error("{}: Failed to register DataTreeChangeListener {} at path {}", logContext(),
                             getInstance(), registeredPath, failure);
-                } else {
-                    RegisterDataTreeNotificationListenerReply reply = (RegisterDataTreeNotificationListenerReply)result;
-                    setListenerRegistrationActor(actorUtils.actorSelection(
-                            reply.getListenerRegistrationPath()));
+                    } else {
+                        setListenerRegistrationActor(actorUtils.actorSelection(
+                            ((RegisterDataTreeNotificationListenerReply) result).getListenerRegistrationPath()));
+                    }
                 }
-            }
-        }, actorUtils.getClientDispatcher());
+            }, actorUtils.getClientDispatcher());
     }
 
     @VisibleForTesting
index 09586b270b733830e3e6f0d08f1a3caf29833799..f5e1d1374b8b8923fee3b785c85d31b3a4778b12 100644 (file)
@@ -23,9 +23,9 @@ import org.slf4j.LoggerFactory;
 final class DataTreeChangeListenerSupport extends LeaderLocalDelegateFactory<RegisterDataTreeChangeListener> {
     private static final Logger LOG = LoggerFactory.getLogger(DataTreeChangeListenerSupport.class);
 
-    private final Collection<DelayedDataTreeChangeListenerRegistration<DOMDataTreeChangeListener>>
+    private final Collection<DelayedDataTreeChangeListenerRegistration>
             delayedDataTreeChangeListenerRegistrations = ConcurrentHashMap.newKeySet();
-    private final Collection<DelayedDataTreeChangeListenerRegistration<DOMDataTreeChangeListener>>
+    private final Collection<DelayedDataTreeChangeListenerRegistration>
             delayedListenerOnAllRegistrations = ConcurrentHashMap.newKeySet();
     private final Collection<ActorSelection> leaderOnlyListenerActors = ConcurrentHashMap.newKeySet();
     private final Collection<ActorSelection> allListenerActors = ConcurrentHashMap.newKeySet();
@@ -62,8 +62,7 @@ final class DataTreeChangeListenerSupport extends LeaderLocalDelegateFactory<Reg
         }
 
         if (hasLeader) {
-            for (DelayedDataTreeChangeListenerRegistration<DOMDataTreeChangeListener> reg :
-                    delayedListenerOnAllRegistrations) {
+            for (var reg : delayedListenerOnAllRegistrations) {
                 reg.doRegistration(this);
             }
 
@@ -71,8 +70,7 @@ final class DataTreeChangeListenerSupport extends LeaderLocalDelegateFactory<Reg
         }
 
         if (isLeader) {
-            for (DelayedDataTreeChangeListenerRegistration<DOMDataTreeChangeListener> reg :
-                    delayedDataTreeChangeListenerRegistrations) {
+            for (var reg : delayedDataTreeChangeListenerRegistrations) {
                 reg.doRegistration(this);
             }
 
@@ -91,9 +89,8 @@ final class DataTreeChangeListenerSupport extends LeaderLocalDelegateFactory<Reg
         } else {
             LOG.debug("{}: Shard does not have a leader - delaying registration", persistenceId());
 
-            final DelayedDataTreeChangeListenerRegistration<DOMDataTreeChangeListener> delayedReg =
-                    new DelayedDataTreeChangeListenerRegistration<>(message, registrationActor);
-            final Collection<DelayedDataTreeChangeListenerRegistration<DOMDataTreeChangeListener>> delayedRegList;
+            final var delayedReg = new DelayedDataTreeChangeListenerRegistration(message, registrationActor);
+            final Collection<DelayedDataTreeChangeListenerRegistration> delayedRegList;
             if (message.isRegisterOnAllInstances()) {
                 delayedRegList = delayedListenerOnAllRegistrations;
             } else {
index 9c0d1ca569122629237190ca9d9621eefd455d67..9efca6493716ed72ec030610fb67ebd8691b2ac3 100644 (file)
@@ -14,7 +14,6 @@ import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.MoreExecutors;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.Map;
@@ -30,7 +29,7 @@ import org.opendaylight.mdsal.common.api.ThreePhaseCommitStep;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeCandidate;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 
 /**
  * Proxy actor which acts as a facade to the user-provided commit cohort. Responsible for
@@ -90,10 +89,10 @@ final class DataTreeCohortActor extends AbstractUntypedActor {
 
         private final Collection<DOMDataTreeCandidate> candidates;
         private final ActorRef cohort;
-        private final SchemaContext schema;
+        private final EffectiveModelContext schema;
 
         CanCommit(final TransactionIdentifier txId, final Collection<DOMDataTreeCandidate> candidates,
-                final SchemaContext schema, final ActorRef cohort) {
+                final EffectiveModelContext schema, final ActorRef cohort) {
             super(txId);
             this.cohort = Objects.requireNonNull(cohort);
             this.candidates = Objects.requireNonNull(candidates);
@@ -104,7 +103,7 @@ final class DataTreeCohortActor extends AbstractUntypedActor {
             return candidates;
         }
 
-        SchemaContext getSchema() {
+        EffectiveModelContext getSchema() {
             return schema;
         }
 
@@ -208,15 +207,11 @@ final class DataTreeCohortActor extends AbstractUntypedActor {
             }, callbackExecutor);
         }
 
-        @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-                justification = "https://github.com/spotbugs/spotbugs/issues/811")
         private void failed(final TransactionIdentifier txId, final ActorRef sender, final Throwable failure) {
             currentStateMap.remove(txId);
             sender.tell(new Status.Failure(failure), getSelf());
         }
 
-        @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-                justification = "https://github.com/spotbugs/spotbugs/issues/811")
         private void success(final TransactionIdentifier txId, final ActorRef sender, final S nextStep) {
             currentStateMap.computeIfPresent(txId, (key, behaviour) -> nextBehaviour(txId, nextStep));
             sender.tell(new Success(getSelf(), txId), getSelf());
index 3ff6a9f0e61ba90e7e17a9f2a6b0bf34a7a3a818..fa10f947db49d7c9f73e369ca568f5bbdcbb9ac2 100644 (file)
@@ -26,27 +26,24 @@ import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeCandidate;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
 import org.opendaylight.mdsal.dom.spi.AbstractRegistrationTree;
-import org.opendaylight.mdsal.dom.spi.RegistrationTreeNode;
-import org.opendaylight.mdsal.dom.spi.RegistrationTreeSnapshot;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
  * Registry of user commit cohorts, which is responsible for handling registration and calculation
  * of affected cohorts based on {@link DataTreeCandidate}. This class is NOT thread-safe.
- *
  */
 class DataTreeCohortActorRegistry extends AbstractRegistrationTree<ActorRef> {
 
     private static final Logger LOG = LoggerFactory.getLogger(DataTreeCohortActorRegistry.class);
 
-    private final Map<ActorRef, RegistrationTreeNode<ActorRef>> cohortToNode = new HashMap<>();
+    private final Map<ActorRef, Node<ActorRef>> cohortToNode = new HashMap<>();
 
     Collection<ActorRef> getCohortActors() {
         return new ArrayList<>(cohortToNode.keySet());
@@ -57,8 +54,7 @@ class DataTreeCohortActorRegistry extends AbstractRegistrationTree<ActorRef> {
         takeLock();
         try {
             final ActorRef cohortRef = cohort.getCohort();
-            final RegistrationTreeNode<ActorRef> node =
-                    findNodeFor(cohort.getPath().getRootIdentifier().getPathArguments());
+            final Node<ActorRef> node = findNodeFor(cohort.getPath().path().getPathArguments());
             addRegistration(node, cohort.getCohort());
             cohortToNode.put(cohortRef, node);
         } catch (final Exception e) {
@@ -72,7 +68,7 @@ class DataTreeCohortActorRegistry extends AbstractRegistrationTree<ActorRef> {
 
     void removeCommitCohort(final ActorRef sender, final RemoveCohort message) {
         final ActorRef cohort = message.getCohort();
-        final RegistrationTreeNode<ActorRef> node = cohortToNode.get(cohort);
+        final Node<ActorRef> node = cohortToNode.get(cohort);
         if (node != null) {
             removeRegistration(node, cohort);
             cohortToNode.remove(cohort);
@@ -82,8 +78,8 @@ class DataTreeCohortActorRegistry extends AbstractRegistrationTree<ActorRef> {
     }
 
     List<DataTreeCohortActor.CanCommit> createCanCommitMessages(final TransactionIdentifier txId,
-            final DataTreeCandidate candidate, final SchemaContext schema) {
-        try (RegistrationTreeSnapshot<ActorRef> cohorts = takeSnapshot()) {
+            final DataTreeCandidate candidate, final EffectiveModelContext schema) {
+        try (var cohorts = takeSnapshot()) {
             return new CanCommitMessageBuilder(txId, candidate, schema).perform(cohorts.getRootNode());
         }
     }
@@ -131,54 +127,52 @@ class DataTreeCohortActorRegistry extends AbstractRegistrationTree<ActorRef> {
         private final Multimap<ActorRef, DOMDataTreeCandidate> actorToCandidates = ArrayListMultimap.create();
         private final TransactionIdentifier txId;
         private final DataTreeCandidate candidate;
-        private final SchemaContext schema;
+        private final EffectiveModelContext schema;
 
         CanCommitMessageBuilder(final TransactionIdentifier txId, final DataTreeCandidate candidate,
-                final SchemaContext schema) {
+                final EffectiveModelContext schema) {
             this.txId = requireNonNull(txId);
             this.candidate = requireNonNull(candidate);
             this.schema = schema;
         }
 
         private void lookupAndCreateCanCommits(final List<PathArgument> args, final int offset,
-                final RegistrationTreeNode<ActorRef> node) {
+                final Node<ActorRef> node) {
 
             if (args.size() != offset) {
                 final PathArgument arg = args.get(offset);
-                final RegistrationTreeNode<ActorRef> exactChild = node.getExactChild(arg);
+                final var exactChild = node.getExactChild(arg);
                 if (exactChild != null) {
                     lookupAndCreateCanCommits(args, offset + 1, exactChild);
                 }
-                for (final RegistrationTreeNode<ActorRef> c : node.getInexactChildren(arg)) {
-                    lookupAndCreateCanCommits(args, offset + 1, c);
+                for (var inexact : node.getInexactChildren(arg)) {
+                    lookupAndCreateCanCommits(args, offset + 1, inexact);
                 }
             } else {
                 lookupAndCreateCanCommits(candidate.getRootPath(), node, candidate.getRootNode());
             }
         }
 
-        private void lookupAndCreateCanCommits(final YangInstanceIdentifier path,
-                final RegistrationTreeNode<ActorRef> regNode, final DataTreeCandidateNode candNode) {
-            if (candNode.getModificationType() == ModificationType.UNMODIFIED) {
+        private void lookupAndCreateCanCommits(final YangInstanceIdentifier path, final Node<ActorRef> regNode,
+                final DataTreeCandidateNode candNode) {
+            if (candNode.modificationType() == ModificationType.UNMODIFIED) {
                 LOG.debug("Skipping unmodified candidate {}", path);
                 return;
             }
-            final Collection<ActorRef> regs = regNode.getRegistrations();
+            final var regs = regNode.getRegistrations();
             if (!regs.isEmpty()) {
                 createCanCommits(regs, path, candNode);
             }
 
-            for (final DataTreeCandidateNode candChild : candNode.getChildNodes()) {
-                if (candChild.getModificationType() != ModificationType.UNMODIFIED) {
-                    final RegistrationTreeNode<ActorRef> regChild =
-                            regNode.getExactChild(candChild.getIdentifier());
+            for (var candChild : candNode.childNodes()) {
+                if (candChild.modificationType() != ModificationType.UNMODIFIED) {
+                    final var regChild = regNode.getExactChild(candChild.name());
                     if (regChild != null) {
-                        lookupAndCreateCanCommits(path.node(candChild.getIdentifier()), regChild, candChild);
+                        lookupAndCreateCanCommits(path.node(candChild.name()), regChild, candChild);
                     }
 
-                    for (final RegistrationTreeNode<ActorRef> rc : regNode
-                            .getInexactChildren(candChild.getIdentifier())) {
-                        lookupAndCreateCanCommits(path.node(candChild.getIdentifier()), rc, candChild);
+                    for (var rc : regNode.getInexactChildren(candChild.name())) {
+                        lookupAndCreateCanCommits(path.node(candChild.name()), rc, candChild);
                     }
                 }
             }
@@ -193,11 +187,11 @@ class DataTreeCohortActorRegistry extends AbstractRegistrationTree<ActorRef> {
         }
 
         private static DOMDataTreeIdentifier treeIdentifier(final YangInstanceIdentifier path) {
-            return new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, path);
+            return DOMDataTreeIdentifier.of(LogicalDatastoreType.CONFIGURATION, path);
         }
 
-        List<DataTreeCohortActor.CanCommit> perform(final RegistrationTreeNode<ActorRef> rootNode) {
-            final List<PathArgument> toLookup = candidate.getRootPath().getPathArguments();
+        List<DataTreeCohortActor.CanCommit> perform(final Node<ActorRef> rootNode) {
+            final var toLookup = candidate.getRootPath().getPathArguments();
             lookupAndCreateCanCommits(toLookup, 0, rootNode);
 
             final Map<ActorRef, Collection<DOMDataTreeCandidate>> mapView = actorToCandidates.asMap();
@@ -210,7 +204,7 @@ class DataTreeCohortActorRegistry extends AbstractRegistrationTree<ActorRef> {
         }
     }
 
-    CompositeDataTreeCohort createCohort(final SchemaContext schemaContext, final TransactionIdentifier txId,
+    CompositeDataTreeCohort createCohort(final EffectiveModelContext schemaContext, final TransactionIdentifier txId,
             final Executor callbackExecutor, final Timeout commitStepTimeout) {
         return new CompositeDataTreeCohort(this, txId, schemaContext, callbackExecutor, commitStepTimeout);
     }
index e5f4ceaa7e105770545d9be6f3951f3e34e39c44..4e3c6cb8d77bc0043dc883b29e0467bc120e16c2 100644 (file)
@@ -13,13 +13,11 @@ import akka.actor.ActorRef;
 import akka.dispatch.OnComplete;
 import akka.pattern.Patterns;
 import akka.util.Timeout;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.concurrent.TimeUnit;
 import org.checkerframework.checker.lock.qual.GuardedBy;
 import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistration;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
 import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
 import org.slf4j.Logger;
@@ -27,11 +25,10 @@ import org.slf4j.LoggerFactory;
 import scala.concurrent.Future;
 import scala.concurrent.duration.FiniteDuration;
 
-public class DataTreeCohortRegistrationProxy<C extends DOMDataTreeCommitCohort> extends AbstractObjectRegistration<C>
-        implements DOMDataTreeCommitCohortRegistration<C> {
-
+public class DataTreeCohortRegistrationProxy<C extends DOMDataTreeCommitCohort> extends AbstractObjectRegistration<C> {
     private static final Logger LOG = LoggerFactory.getLogger(DataTreeCohortRegistrationProxy.class);
     private static final Timeout TIMEOUT = new Timeout(new FiniteDuration(5, TimeUnit.SECONDS));
+
     private final DOMDataTreeIdentifier subtree;
     private final ActorRef actor;
     private final ActorUtils actorUtils;
@@ -43,8 +40,8 @@ public class DataTreeCohortRegistrationProxy<C extends DOMDataTreeCommitCohort>
         super(cohort);
         this.subtree = requireNonNull(subtree);
         this.actorUtils = requireNonNull(actorUtils);
-        this.actor = actorUtils.getActorSystem().actorOf(DataTreeCohortActor.props(getInstance(),
-                subtree.getRootIdentifier()).withDispatcher(actorUtils.getNotificationDispatcherPath()));
+        actor = actorUtils.getActorSystem().actorOf(DataTreeCohortActor.props(getInstance(),
+                subtree.path()).withDispatcher(actorUtils.getNotificationDispatcherPath()));
     }
 
     public void init(final String shardName) {
@@ -66,8 +63,6 @@ public class DataTreeCohortRegistrationProxy<C extends DOMDataTreeCommitCohort>
         }, actorUtils.getClientDispatcher());
     }
 
-    @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-            justification = "https://github.com/spotbugs/spotbugs/issues/811")
     private synchronized void performRegistration(final ActorRef shard) {
         if (isClosed()) {
             return;
@@ -75,7 +70,7 @@ public class DataTreeCohortRegistrationProxy<C extends DOMDataTreeCommitCohort>
         cohortRegistry = shard;
         Future<Object> future =
                 Patterns.ask(shard, new DataTreeCohortActorRegistry.RegisterCohort(subtree, actor), TIMEOUT);
-        future.onComplete(new OnComplete<Object>() {
+        future.onComplete(new OnComplete<>() {
 
             @Override
             public void onComplete(final Throwable failure, final Object val) {
index 28fb89d2be3a9756523094f29d5904d6c6f82e3e..24b37751272b68741ab9e296b835244e6b337944 100644 (file)
@@ -24,7 +24,7 @@ import org.opendaylight.controller.cluster.raft.ConfigParams;
 import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
 import org.opendaylight.controller.cluster.raft.PeerAddressResolver;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStoreProperties.ExportOnRecovery;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStoreProperties.ExportOnRecovery;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -65,7 +65,7 @@ public class DatastoreContext implements ClientActorConfig {
     public static final int DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT = 1000;
     public static final long DEFAULT_SHARD_COMMIT_QUEUE_EXPIRY_TIMEOUT_IN_MS =
             TimeUnit.MILLISECONDS.convert(2, TimeUnit.MINUTES);
-    public static final int DEFAULT_MAX_MESSAGE_SLICE_SIZE = 2048 * 1000; // 2MB
+    public static final int DEFAULT_MAX_MESSAGE_SLICE_SIZE = 480 * 1024; // 480KiB
     public static final int DEFAULT_INITIAL_PAYLOAD_SERIALIZED_BUFFER_CAPACITY = 512;
     public static final ExportOnRecovery DEFAULT_EXPORT_ON_RECOVERY = ExportOnRecovery.Off;
     public static final String DEFAULT_RECOVERY_EXPORT_BASE_DIR = "persistence-export";
@@ -92,11 +92,10 @@ public class DatastoreContext implements ClientActorConfig {
     private long transactionCreationInitialRateLimit = DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT;
     private String dataStoreName = UNKNOWN_DATA_STORE_TYPE;
     private LogicalDatastoreType logicalStoreType = LogicalDatastoreType.OPERATIONAL;
-    private YangInstanceIdentifier storeRoot = YangInstanceIdentifier.empty();
+    private YangInstanceIdentifier storeRoot = YangInstanceIdentifier.of();
     private int shardBatchedModificationCount = DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT;
     private boolean writeOnlyTransactionOptimizationsEnabled = true;
     private long shardCommitQueueExpiryTimeoutInMillis = DEFAULT_SHARD_COMMIT_QUEUE_EXPIRY_TIMEOUT_IN_MS;
-    private boolean useTellBasedProtocol = false;
     private boolean transactionDebugContextEnabled = false;
     private String shardManagerPersistenceId;
     private int maximumMessageSliceSize = DEFAULT_MAX_MESSAGE_SLICE_SIZE;
@@ -127,34 +126,33 @@ public class DatastoreContext implements ClientActorConfig {
     }
 
     private DatastoreContext(final DatastoreContext other) {
-        this.shardTransactionIdleTimeout = other.shardTransactionIdleTimeout;
-        this.operationTimeoutInMillis = other.operationTimeoutInMillis;
-        this.dataStoreMXBeanType = other.dataStoreMXBeanType;
-        this.shardTransactionCommitTimeoutInSeconds = other.shardTransactionCommitTimeoutInSeconds;
-        this.shardTransactionCommitQueueCapacity = other.shardTransactionCommitQueueCapacity;
-        this.shardInitializationTimeout = other.shardInitializationTimeout;
-        this.shardLeaderElectionTimeout = other.shardLeaderElectionTimeout;
-        this.initialSettleTimeoutMultiplier = other.initialSettleTimeoutMultiplier;
-        this.persistent = other.persistent;
-        this.snapshotOnRootOverwrite = other.snapshotOnRootOverwrite;
-        this.configurationReader = other.configurationReader;
-        this.transactionCreationInitialRateLimit = other.transactionCreationInitialRateLimit;
-        this.dataStoreName = other.dataStoreName;
-        this.logicalStoreType = other.logicalStoreType;
-        this.storeRoot = other.storeRoot;
-        this.shardBatchedModificationCount = other.shardBatchedModificationCount;
-        this.writeOnlyTransactionOptimizationsEnabled = other.writeOnlyTransactionOptimizationsEnabled;
-        this.shardCommitQueueExpiryTimeoutInMillis = other.shardCommitQueueExpiryTimeoutInMillis;
-        this.transactionDebugContextEnabled = other.transactionDebugContextEnabled;
-        this.shardManagerPersistenceId = other.shardManagerPersistenceId;
-        this.useTellBasedProtocol = other.useTellBasedProtocol;
-        this.backendAlivenessTimerInterval = other.backendAlivenessTimerInterval;
-        this.requestTimeout = other.requestTimeout;
-        this.noProgressTimeout = other.noProgressTimeout;
-        this.initialPayloadSerializedBufferCapacity = other.initialPayloadSerializedBufferCapacity;
-        this.useLz4Compression = other.useLz4Compression;
-        this.exportOnRecovery = other.exportOnRecovery;
-        this.recoveryExportBaseDir = other.recoveryExportBaseDir;
+        shardTransactionIdleTimeout = other.shardTransactionIdleTimeout;
+        operationTimeoutInMillis = other.operationTimeoutInMillis;
+        dataStoreMXBeanType = other.dataStoreMXBeanType;
+        shardTransactionCommitTimeoutInSeconds = other.shardTransactionCommitTimeoutInSeconds;
+        shardTransactionCommitQueueCapacity = other.shardTransactionCommitQueueCapacity;
+        shardInitializationTimeout = other.shardInitializationTimeout;
+        shardLeaderElectionTimeout = other.shardLeaderElectionTimeout;
+        initialSettleTimeoutMultiplier = other.initialSettleTimeoutMultiplier;
+        persistent = other.persistent;
+        snapshotOnRootOverwrite = other.snapshotOnRootOverwrite;
+        configurationReader = other.configurationReader;
+        transactionCreationInitialRateLimit = other.transactionCreationInitialRateLimit;
+        dataStoreName = other.dataStoreName;
+        logicalStoreType = other.logicalStoreType;
+        storeRoot = other.storeRoot;
+        shardBatchedModificationCount = other.shardBatchedModificationCount;
+        writeOnlyTransactionOptimizationsEnabled = other.writeOnlyTransactionOptimizationsEnabled;
+        shardCommitQueueExpiryTimeoutInMillis = other.shardCommitQueueExpiryTimeoutInMillis;
+        transactionDebugContextEnabled = other.transactionDebugContextEnabled;
+        shardManagerPersistenceId = other.shardManagerPersistenceId;
+        backendAlivenessTimerInterval = other.backendAlivenessTimerInterval;
+        requestTimeout = other.requestTimeout;
+        noProgressTimeout = other.noProgressTimeout;
+        initialPayloadSerializedBufferCapacity = other.initialPayloadSerializedBufferCapacity;
+        useLz4Compression = other.useLz4Compression;
+        exportOnRecovery = other.exportOnRecovery;
+        recoveryExportBaseDir = other.recoveryExportBaseDir;
 
         setShardJournalRecoveryLogBatchSize(other.raftConfig.getJournalRecoveryLogBatchSize());
         setSnapshotBatchCount(other.raftConfig.getSnapshotBatchCount());
@@ -167,7 +165,6 @@ public class DatastoreContext implements ClientActorConfig {
         setCandidateElectionTimeoutDivisor(other.raftConfig.getCandidateElectionTimeoutDivisor());
         setCustomRaftPolicyImplementation(other.raftConfig.getCustomRaftPolicyImplementationClass());
         setMaximumMessageSliceSize(other.getMaximumMessageSliceSize());
-        setShardSnapshotChunkSize(other.raftConfig.getSnapshotChunkSize());
         setPeerAddressResolver(other.raftConfig.getPeerAddressResolver());
         setTempFileDirectory(other.getTempFileDirectory());
         setFileBackedStreamingThreshold(other.getFileBackedStreamingThreshold());
@@ -229,7 +226,7 @@ public class DatastoreContext implements ClientActorConfig {
     }
 
     public boolean isSnapshotOnRootOverwrite() {
-        return this.snapshotOnRootOverwrite;
+        return snapshotOnRootOverwrite;
     }
 
     public AkkaConfigurationReader getConfigurationReader() {
@@ -331,17 +328,8 @@ public class DatastoreContext implements ClientActorConfig {
         raftConfig.setRecoverySnapshotIntervalSeconds(recoverySnapshotInterval);
     }
 
-    @Deprecated
-    private void setShardSnapshotChunkSize(final int shardSnapshotChunkSize) {
-        // We'll honor the shardSnapshotChunkSize setting for backwards compatibility but only if it doesn't exceed
-        // maximumMessageSliceSize.
-        if (shardSnapshotChunkSize < maximumMessageSliceSize) {
-            raftConfig.setSnapshotChunkSize(shardSnapshotChunkSize);
-        }
-    }
-
     private void setMaximumMessageSliceSize(final int maximumMessageSliceSize) {
-        raftConfig.setSnapshotChunkSize(maximumMessageSliceSize);
+        raftConfig.setMaximumMessageSliceSize(maximumMessageSliceSize);
         this.maximumMessageSliceSize = maximumMessageSliceSize;
     }
 
@@ -365,10 +353,6 @@ public class DatastoreContext implements ClientActorConfig {
         return transactionDebugContextEnabled;
     }
 
-    public boolean isUseTellBasedProtocol() {
-        return useTellBasedProtocol;
-    }
-
     public boolean isUseLz4Compression() {
         return useLz4Compression;
     }
@@ -405,7 +389,7 @@ public class DatastoreContext implements ClientActorConfig {
         return initialPayloadSerializedBufferCapacity;
     }
 
-    public static class Builder implements org.opendaylight.yangtools.concepts.Builder<DatastoreContext> {
+    public static class Builder {
         private final DatastoreContext datastoreContext;
 
         Builder(final DatastoreContext datastoreContext) {
@@ -602,11 +586,6 @@ public class DatastoreContext implements ClientActorConfig {
             return this;
         }
 
-        public Builder useTellBasedProtocol(final boolean value) {
-            datastoreContext.useTellBasedProtocol = value;
-            return this;
-        }
-
         public Builder useLz4Compression(final boolean value) {
             datastoreContext.useLz4Compression = value;
             return this;
@@ -636,14 +615,6 @@ public class DatastoreContext implements ClientActorConfig {
             return this;
         }
 
-        @Deprecated
-        public Builder shardSnapshotChunkSize(final int shardSnapshotChunkSize) {
-            LOG.warn("The shard-snapshot-chunk-size configuration parameter is deprecated - "
-                    + "use maximum-message-slice-size instead");
-            datastoreContext.setShardSnapshotChunkSize(shardSnapshotChunkSize);
-            return this;
-        }
-
         public Builder maximumMessageSliceSize(final int maximumMessageSliceSize) {
             datastoreContext.setMaximumMessageSliceSize(maximumMessageSliceSize);
             return this;
@@ -689,7 +660,6 @@ public class DatastoreContext implements ClientActorConfig {
             return this;
         }
 
-        @Override
         public DatastoreContext build() {
             if (datastoreContext.dataStoreName != null) {
                 GLOBAL_DATASTORE_NAMES.add(datastoreContext.dataStoreName);
index 0ee005a708c8d4c660ee3df2980b08045bb964d6..ac50ff30a26e961f81517ac365441e578d1c51e0 100644 (file)
@@ -23,7 +23,6 @@ import java.util.List;
 import java.util.Locale;
 import java.util.Map;
 import java.util.Map.Entry;
-import java.util.Optional;
 import java.util.Set;
 import java.util.function.Function;
 import javax.management.ConstructorParameters;
@@ -31,8 +30,8 @@ import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.text.WordUtils;
 import org.checkerframework.checker.lock.qual.GuardedBy;
 import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStoreProperties;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStorePropertiesContainer;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStoreProperties;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStorePropertiesContainer;
 import org.opendaylight.yangtools.yang.common.Uint16;
 import org.opendaylight.yangtools.yang.common.Uint32;
 import org.opendaylight.yangtools.yang.common.Uint64;
@@ -84,7 +83,7 @@ public class DatastoreContextIntrospector {
     private static void introspectPrimitiveTypes() {
         final Set<Class<?>> primitives = ImmutableSet.<Class<?>>builder().addAll(
                 Primitives.allWrapperTypes()).add(String.class).build();
-        for (final Class<?> primitive: primitives) {
+        for (final Class<?> primitive : primitives) {
             try {
                 processPropertyType(primitive);
             } catch (final NoSuchMethodException e) {
@@ -175,7 +174,7 @@ public class DatastoreContextIntrospector {
             // This must be a yang-defined type. We need to find the constructor that takes a
             // primitive as the only argument. This will be used to construct instances to perform
             // validation (eg range checking). The yang-generated types have a couple single-argument
-            // constructors but the one we want has the bean ConstructorProperties annotation.
+            // constructors but the one we want has the ConstructorParameters annotation.
             for (final Constructor<?> ctor: propertyType.getConstructors()) {
                 final ConstructorParameters ctorParAnnotation = ctor.getAnnotation(ConstructorParameters.class);
                 if (ctor.getParameterCount() == 1 && ctorParAnnotation != null) {
@@ -382,9 +381,8 @@ public class DatastoreContextIntrospector {
         if (propertyType.isEnum()) {
             try {
                 final Method enumConstructor = propertyType.getDeclaredMethod("forName", String.class);
-                final Object optional =  enumConstructor.invoke(null, from.toString().toLowerCase(Locale.ROOT));
-                if (optional instanceof Optional) {
-                    return ((Optional<Object>)optional).orElseThrow();
+                if (enumConstructor.getReturnType().equals(propertyType)) {
+                    return enumConstructor.invoke(null, from.toString().toLowerCase(Locale.ROOT));
                 }
             } catch (NoSuchMethodException e) {
                 LOG.error("Error constructing value ({}) for enum {}", from, propertyType);
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DebugThreePhaseCommitCohort.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DebugThreePhaseCommitCohort.java
deleted file mode 100644 (file)
index afb5773..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.util.List;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-
-/**
- * An AbstractThreePhaseCommitCohort implementation used for debugging. If a failure occurs, the transaction
- * call site is printed.
- *
- * @author Thomas Pantelis
- */
-class DebugThreePhaseCommitCohort extends AbstractThreePhaseCommitCohort<Object> {
-    private static final Logger LOG = LoggerFactory.getLogger(DebugThreePhaseCommitCohort.class);
-
-    private final AbstractThreePhaseCommitCohort<?> delegate;
-    private final Throwable debugContext;
-    private final TransactionIdentifier transactionId;
-
-    @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_FINAL")
-    private Logger log = LOG;
-
-    DebugThreePhaseCommitCohort(final TransactionIdentifier transactionId,
-            final AbstractThreePhaseCommitCohort<?> delegate, final Throwable debugContext) {
-        this.delegate = requireNonNull(delegate);
-        this.debugContext = requireNonNull(debugContext);
-        this.transactionId = requireNonNull(transactionId);
-    }
-
-    private <V> ListenableFuture<V> addFutureCallback(final ListenableFuture<V> future) {
-        Futures.addCallback(future, new FutureCallback<V>() {
-            @Override
-            public void onSuccess(final V result) {
-                // no-op
-            }
-
-            @Override
-            public void onFailure(final Throwable failure) {
-                log.warn("Transaction {} failed with error \"{}\" - was allocated in the following context",
-                        transactionId, failure, debugContext);
-            }
-        }, MoreExecutors.directExecutor());
-
-        return future;
-    }
-
-    @Override
-    public ListenableFuture<Boolean> canCommit() {
-        return addFutureCallback(delegate.canCommit());
-    }
-
-    @Override
-    public ListenableFuture<Void> preCommit() {
-        return addFutureCallback(delegate.preCommit());
-    }
-
-    @Override
-    public ListenableFuture<Void> commit() {
-        return addFutureCallback(delegate.commit());
-    }
-
-    @Override
-    public ListenableFuture<Void> abort() {
-        return delegate.abort();
-    }
-
-    @SuppressWarnings({ "rawtypes", "unchecked" })
-    @Override
-    List<Future<Object>> getCohortFutures() {
-        return ((AbstractThreePhaseCommitCohort)delegate).getCohortFutures();
-    }
-
-    @VisibleForTesting
-    void setLogger(final Logger logger) {
-        this.log = logger;
-    }
-}
index 720aadb175dccf29aea8895d6ec2ab60b08ba964..d7d90474f33ad244688e150a0a54d5835c9859c0 100644 (file)
@@ -7,15 +7,14 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
-import java.util.Collection;
+import java.util.List;
 import java.util.Optional;
 import java.util.function.Consumer;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.mdsal.dom.spi.AbstractDOMDataTreeChangeListenerRegistration;
 import org.opendaylight.mdsal.dom.spi.store.AbstractDOMStoreTreeChangePublisher;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -46,38 +45,34 @@ final class DefaultShardDataTreeChangeListenerPublisher extends AbstractDOMStore
     }
 
     @Override
-    protected void notifyListener(final AbstractDOMDataTreeChangeListenerRegistration<?> registration,
-            final Collection<DataTreeCandidate> changes) {
-        LOG.debug("{}: notifyListener: listener: {}", logContext, registration.getInstance());
-        registration.getInstance().onDataTreeChanged(changes);
+    protected void notifyListener(final Reg registration, final List<DataTreeCandidate> changes) {
+        final var listener = registration.listener();
+        LOG.debug("{}: notifyListener: listener: {}", logContext, listener);
+        listener.onDataTreeChanged(changes);
     }
 
     @Override
-    protected void registrationRemoved(final AbstractDOMDataTreeChangeListenerRegistration<?> registration) {
+    protected void registrationRemoved(final Reg registration) {
         LOG.debug("Registration {} removed", registration);
     }
 
     @Override
     public void registerTreeChangeListener(final YangInstanceIdentifier treeId,
             final DOMDataTreeChangeListener listener, final Optional<DataTreeCandidate> initialState,
-            final Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration) {
+            final Consumer<Registration> onRegistration) {
         registerTreeChangeListener(treeId, listener, onRegistration);
 
         if (initialState.isPresent()) {
-            notifySingleListener(treeId, listener, initialState.get(), logContext);
+            notifySingleListener(treeId, listener, initialState.orElseThrow(), logContext);
         } else {
             listener.onInitialData();
         }
     }
 
     void registerTreeChangeListener(final YangInstanceIdentifier treeId, final DOMDataTreeChangeListener listener,
-            final Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration) {
+            final Consumer<Registration> onRegistration) {
         LOG.debug("{}: registerTreeChangeListener: path: {}, listener: {}", logContext, treeId, listener);
-
-        AbstractDOMDataTreeChangeListenerRegistration<DOMDataTreeChangeListener> registration =
-                super.registerTreeChangeListener(treeId, listener);
-
-        onRegistration.accept(registration);
+        onRegistration.accept(super.registerTreeChangeListener(treeId, listener));
     }
 
     static void notifySingleListener(final YangInstanceIdentifier treeId, final DOMDataTreeChangeListener listener,
index ef26e94ee97f84edd4ea4de2648e0274c0f4df32..740aef92b8d38bf61ab8e59316f20397efd4ace0 100644 (file)
@@ -8,12 +8,11 @@
 package org.opendaylight.controller.cluster.datastore;
 
 import akka.actor.ActorRef;
-import java.util.EventListener;
 import org.checkerframework.checker.lock.qual.GuardedBy;
 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 
-class DelayedDataTreeChangeListenerRegistration<L extends EventListener> implements ListenerRegistration<L> {
+class DelayedDataTreeChangeListenerRegistration implements Registration {
     private final RegisterDataTreeChangeListener registrationMessage;
     private final ActorRef registrationActor;
 
@@ -32,17 +31,6 @@ class DelayedDataTreeChangeListenerRegistration<L extends EventListener> impleme
         }
     }
 
-    @Override
-    public L getInstance() {
-        // ObjectRegistration annotates this method as @Nonnull but we could return null if the delegate is not set yet.
-        // In reality, we do not and should not ever call this method on DelayedDataTreeChangeListenerRegistration
-        // instances anyway but, since we have to provide an implementation to satisfy the interface, we throw
-        // UnsupportedOperationException to honor the API contract of not returning null and to avoid a FindBugs error
-        // for possibly returning null.
-        throw new UnsupportedOperationException(
-                "getInstance should not be called on this instance since it could be null");
-    }
-
     @Override
     public synchronized void close() {
         closed = true;
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DelayedTransactionContextWrapper.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DelayedTransactionContextWrapper.java
deleted file mode 100644 (file)
index 17df235..0000000
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static com.google.common.base.Preconditions.checkState;
-
-import akka.actor.ActorSelection;
-import akka.dispatch.Futures;
-import java.util.AbstractMap.SimpleImmutableEntry;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.Optional;
-import java.util.SortedSet;
-import org.checkerframework.checker.lock.qual.GuardedBy;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-import scala.concurrent.Promise;
-
-/**
- * Delayed implementation of TransactionContextWrapper. Operations destined for the target
- * TransactionContext instance are cached until the TransactionContext instance becomes
- * available at which time they are executed.
- *
- * @author Thomas Pantelis
- */
-final class DelayedTransactionContextWrapper extends AbstractTransactionContextWrapper {
-    private static final Logger LOG = LoggerFactory.getLogger(DelayedTransactionContextWrapper.class);
-
-    /**
-     * The list of transaction operations to execute once the TransactionContext becomes available.
-     */
-    @GuardedBy("queuedTxOperations")
-    private final List<Entry<TransactionOperation, Boolean>> queuedTxOperations = new ArrayList<>();
-
-    /**
-     * The resulting TransactionContext.
-     */
-    private volatile TransactionContext transactionContext;
-    @GuardedBy("queuedTxOperations")
-    private TransactionContext deferredTransactionContext;
-    @GuardedBy("queuedTxOperations")
-    private boolean pendingEnqueue;
-
-    DelayedTransactionContextWrapper(@NonNull final TransactionIdentifier identifier,
-                                     @NonNull final ActorUtils actorUtils, @NonNull final String shardName) {
-        super(identifier, actorUtils, shardName);
-    }
-
-    @Override
-    TransactionContext getTransactionContext() {
-        return transactionContext;
-    }
-
-    @Override
-    void maybeExecuteTransactionOperation(final TransactionOperation op) {
-        final TransactionContext localContext = transactionContext;
-        if (localContext != null) {
-            op.invoke(localContext, null);
-        } else {
-            // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future
-            // callback to be executed after the Tx is created.
-            enqueueTransactionOperation(op);
-        }
-    }
-
-    @Override
-    Future<ActorSelection> readyTransaction(final Optional<SortedSet<String>> participatingShardNames) {
-        // avoid the creation of a promise and a TransactionOperation
-        final TransactionContext localContext = transactionContext;
-        if (localContext != null) {
-            return localContext.readyTransaction(null, participatingShardNames);
-        }
-
-        final Promise<ActorSelection> promise = Futures.promise();
-        enqueueTransactionOperation(new TransactionOperation() {
-            @Override
-            public void invoke(final TransactionContext newTransactionContext, final Boolean havePermit) {
-                promise.completeWith(newTransactionContext.readyTransaction(havePermit, participatingShardNames));
-            }
-        });
-
-        return promise.future();
-    }
-
-    /**
-     * Adds a TransactionOperation to be executed once the TransactionContext becomes available. This method is called
-     * only after the caller has checked (without synchronizing with executePriorTransactionOperations()) that the
-     * context is not available.
-     */
-    private void enqueueTransactionOperation(final TransactionOperation operation) {
-        // We have three things to do here:
-        // - synchronize with executePriorTransactionOperations() so that logical operation ordering is maintained
-        // - acquire a permit for the operation if we still need to enqueue it
-        // - enqueue the operation
-        //
-        // Since each operation needs to acquire a permit exactly once and the limiter is shared between us and the
-        // TransactionContext, we need to know whether an operation has a permit before we enqueue it. Further
-        // complications are:
-        // - this method may be called from the thread invoking executePriorTransactionOperations()
-        // - user may be violating API contract of using the transaction from a single thread
-
-        // As a first step, we will synchronize on the queue and check if the handoff has completed. While we have
-        // the lock, we will assert that we will be enqueing another operation.
-        final TransactionContext contextOnEntry;
-        synchronized (queuedTxOperations) {
-            contextOnEntry = transactionContext;
-            if (contextOnEntry == null) {
-                checkState(pendingEnqueue == false, "Concurrent access to transaction %s detected", getIdentifier());
-                pendingEnqueue = true;
-            }
-        }
-
-        // Short-circuit if there is a context
-        if (contextOnEntry != null) {
-            operation.invoke(transactionContext, null);
-            return;
-        }
-
-        boolean cleanupEnqueue = true;
-        TransactionContext finishHandoff = null;
-        try {
-            // Acquire the permit,
-            final boolean havePermit = getLimiter().acquire();
-            if (!havePermit) {
-                LOG.warn("Failed to acquire enqueue operation permit for transaction {} on shard {}", getIdentifier(),
-                        getShardName());
-            }
-
-            // Ready to enqueue, take the lock again and append the operation
-            synchronized (queuedTxOperations) {
-                LOG.debug("Tx {} Queuing TransactionOperation", getIdentifier());
-                queuedTxOperations.add(new SimpleImmutableEntry<>(operation, havePermit));
-                pendingEnqueue = false;
-                cleanupEnqueue = false;
-                finishHandoff = deferredTransactionContext;
-                deferredTransactionContext = null;
-            }
-        } finally {
-            if (cleanupEnqueue) {
-                synchronized (queuedTxOperations) {
-                    pendingEnqueue = false;
-                    finishHandoff = deferredTransactionContext;
-                    deferredTransactionContext = null;
-                }
-            }
-            if (finishHandoff != null) {
-                executePriorTransactionOperations(finishHandoff);
-            }
-        }
-    }
-
-    void executePriorTransactionOperations(final TransactionContext localTransactionContext) {
-        while (true) {
-            // Access to queuedTxOperations and transactionContext must be protected and atomic
-            // (ie synchronized) with respect to #addTxOperationOnComplete to handle timing
-            // issues and ensure no TransactionOperation is missed and that they are processed
-            // in the order they occurred.
-
-            // We'll make a local copy of the queuedTxOperations list to handle re-entrancy
-            // in case a TransactionOperation results in another transaction operation being
-            // queued (eg a put operation from a client read Future callback that is notified
-            // synchronously).
-            final Collection<Entry<TransactionOperation, Boolean>> operationsBatch;
-            synchronized (queuedTxOperations) {
-                if (queuedTxOperations.isEmpty()) {
-                    if (!pendingEnqueue) {
-                        // We're done invoking the TransactionOperations so we can now publish the TransactionContext.
-                        localTransactionContext.operationHandOffComplete();
-
-                        // This is null-to-non-null transition after which we are releasing the lock and not doing
-                        // any further processing.
-                        transactionContext = localTransactionContext;
-                    } else {
-                        deferredTransactionContext = localTransactionContext;
-                    }
-                    return;
-                }
-
-                operationsBatch = new ArrayList<>(queuedTxOperations);
-                queuedTxOperations.clear();
-            }
-
-            // Invoke TransactionOperations outside the sync block to avoid unnecessary blocking. A slight down-side is
-            // that we need to re-acquire the lock below but this should be negligible.
-            for (Entry<TransactionOperation, Boolean> oper : operationsBatch) {
-                final Boolean permit = oper.getValue();
-                if (permit.booleanValue() && !localTransactionContext.usesOperationLimiting()) {
-                    // If the context is not using limiting we need to release operations as we are queueing them, so
-                    // user threads are not charged for them.
-                    getLimiter().release();
-                }
-                oper.getKey().invoke(localTransactionContext, permit);
-            }
-        }
-    }
-
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DirectTransactionContextWrapper.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DirectTransactionContextWrapper.java
deleted file mode 100644 (file)
index f004088..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import java.util.Optional;
-import java.util.SortedSet;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import scala.concurrent.Future;
-
-/**
- * Direct implementation of TransactionContextWrapper. Operation are executed directly on TransactionContext. Always
- * has completed context and executes on local shard.
- */
-final class DirectTransactionContextWrapper extends AbstractTransactionContextWrapper {
-    private final TransactionContext transactionContext;
-
-    DirectTransactionContextWrapper(@NonNull final TransactionIdentifier identifier,
-                                    @NonNull final ActorUtils actorUtils,
-                                    @NonNull final String shardName,
-                                    @NonNull final TransactionContext transactionContext) {
-        super(identifier, actorUtils, shardName);
-        this.transactionContext = requireNonNull(transactionContext);
-    }
-
-    @Override
-    TransactionContext getTransactionContext() {
-        return transactionContext;
-    }
-
-    @Override
-    void maybeExecuteTransactionOperation(final TransactionOperation op) {
-        op.invoke(transactionContext, null);
-    }
-
-    @Override
-    Future<ActorSelection> readyTransaction(final Optional<SortedSet<String>> participatingShardNames) {
-        return transactionContext.readyTransaction(null, participatingShardNames);
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java
deleted file mode 100644 (file)
index 16198ff..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorSystem;
-import com.google.common.annotations.VisibleForTesting;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.controller.cluster.datastore.config.Configuration;
-import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-
-/**
- * Implements a distributed DOMStore using Akka Patterns.ask().
- */
-public class DistributedDataStore extends AbstractDataStore {
-
-    private final TransactionContextFactory txContextFactory;
-
-    public DistributedDataStore(final ActorSystem actorSystem, final ClusterWrapper cluster,
-            final Configuration configuration, final DatastoreContextFactory datastoreContextFactory,
-            final DatastoreSnapshot restoreFromSnapshot) {
-        super(actorSystem, cluster, configuration, datastoreContextFactory, restoreFromSnapshot);
-        this.txContextFactory = new TransactionContextFactory(getActorUtils(), getIdentifier());
-    }
-
-    @VisibleForTesting
-    DistributedDataStore(final ActorUtils actorUtils, final ClientIdentifier identifier) {
-        super(actorUtils, identifier);
-        this.txContextFactory = new TransactionContextFactory(getActorUtils(), getIdentifier());
-    }
-
-
-    @Override
-    public DOMStoreTransactionChain createTransactionChain() {
-        return txContextFactory.createTransactionChain();
-    }
-
-    @Override
-    public DOMStoreReadTransaction newReadOnlyTransaction() {
-        return new TransactionProxy(txContextFactory, TransactionType.READ_ONLY);
-    }
-
-    @Override
-    public DOMStoreWriteTransaction newWriteOnlyTransaction() {
-        getActorUtils().acquireTxCreationPermit();
-        return new TransactionProxy(txContextFactory, TransactionType.WRITE_ONLY);
-    }
-
-    @Override
-    public DOMStoreReadWriteTransaction newReadWriteTransaction() {
-        getActorUtils().acquireTxCreationPermit();
-        return new TransactionProxy(txContextFactory, TransactionType.READ_WRITE);
-    }
-
-    @Override
-    public void close() {
-        txContextFactory.close();
-        super.close();
-    }
-}
index 221db2faea0ff6157db7f609f9af9179aecfec6b..350b915b579cf724c181db200905da4fe3981f0d 100644 (file)
@@ -7,12 +7,10 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
-import akka.actor.ActorSystem;
 import org.opendaylight.controller.cluster.ActorSystemProvider;
 import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
 import org.opendaylight.controller.cluster.datastore.config.Configuration;
 import org.opendaylight.controller.cluster.datastore.config.ConfigurationImpl;
-import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
 import org.opendaylight.mdsal.dom.api.DOMSchemaService;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -45,7 +43,7 @@ public final class DistributedDataStoreFactory {
 
         updater.setListener(dataStore);
 
-        schemaService.registerSchemaContextListener(dataStore);
+        schemaService.registerSchemaContextListener(dataStore::onModelContextUpdated);
 
         dataStore.setCloseable(updater);
         dataStore.waitTillReady();
@@ -60,8 +58,8 @@ public final class DistributedDataStoreFactory {
         final String datastoreName = initialDatastoreContext.getDataStoreName();
         LOG.info("Create data store instance of type : {}", datastoreName);
 
-        final ActorSystem actorSystem = actorSystemProvider.getActorSystem();
-        final DatastoreSnapshot restoreFromSnapshot = datastoreSnapshotRestore.getAndRemove(datastoreName).orElse(null);
+        final var actorSystem = actorSystemProvider.getActorSystem();
+        final var restoreFromSnapshot = datastoreSnapshotRestore.getAndRemove(datastoreName).orElse(null);
 
         final Configuration config;
         if (orgConfig == null) {
@@ -69,23 +67,12 @@ public final class DistributedDataStoreFactory {
         } else {
             config = orgConfig;
         }
-        final ClusterWrapper clusterWrapper = new ClusterWrapperImpl(actorSystem);
-        final DatastoreContextFactory contextFactory = introspector.newContextFactory();
+        final var clusterWrapper = new ClusterWrapperImpl(actorSystem);
+        final var contextFactory = introspector.newContextFactory();
 
-        // This is the potentially-updated datastore context, distinct from the initial one
-        final DatastoreContext datastoreContext = contextFactory.getBaseDatastoreContext();
-
-        final AbstractDataStore dataStore;
-        if (datastoreContext.isUseTellBasedProtocol()) {
-            dataStore = new ClientBackedDataStore(actorSystem, clusterWrapper, config, contextFactory,
-                restoreFromSnapshot);
-            LOG.info("Data store {} is using tell-based protocol", datastoreName);
-        } else {
-            dataStore = new DistributedDataStore(actorSystem, clusterWrapper, config, contextFactory,
-                restoreFromSnapshot);
-            LOG.info("Data store {} is using ask-based protocol", datastoreName);
-        }
-
-        return dataStore;
+        final var ret = new ClientBackedDataStore(actorSystem, clusterWrapper, config, contextFactory,
+            restoreFromSnapshot);
+        LOG.info("Data store {} is using tell-based protocol", datastoreName);
+        return ret;
     }
 }
index deae01960b12da93ecbade4b82f411a3696a68a5..5f4e30978ef8952a4813ca6f17574af0abc7ccd6 100644 (file)
@@ -11,7 +11,7 @@ import com.google.common.annotations.Beta;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
 import org.opendaylight.mdsal.dom.spi.store.DOMStore;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 
 /**
@@ -24,7 +24,6 @@ public interface DistributedDataStoreInterface extends DOMStore {
     ActorUtils getActorUtils();
 
     @Beta
-    <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerProxyListener(
-            YangInstanceIdentifier shardLookup, YangInstanceIdentifier insideShard,
+    Registration registerProxyListener(YangInstanceIdentifier shardLookup, YangInstanceIdentifier insideShard,
             DOMDataTreeChangeListener delegate);
 }
index 82a30b6c4050e59c02e6c873717f11023e51eebb..dca9c0773e06f4ee02c52ace1e9616c346ecb04d 100644 (file)
@@ -11,12 +11,12 @@ import static java.util.Objects.requireNonNull;
 
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
-import java.util.Collection;
+import java.util.List;
 import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
 import org.opendaylight.controller.cluster.datastore.messages.OnInitialData;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -37,7 +37,7 @@ final class ForwardingDataTreeChangeListener implements DOMDataTreeChangeListene
     }
 
     @Override
-    public void onDataTreeChanged(final Collection<DataTreeCandidate> changes) {
+    public void onDataTreeChanged(final List<DataTreeCandidate> changes) {
         LOG.debug("Sending DataTreeChanged to {}", actor);
         actor.tell(new DataTreeChanged(changes), sendingActor);
     }
index 7f281ab0f34eee7240a06ea1ac7f23b40dccaa73..c89627800fc72ef8a275ca9b65cfdc19c08e1e85 100644 (file)
@@ -7,11 +7,11 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
-import static com.google.common.base.Verify.verify;
 import static java.util.Objects.requireNonNull;
 
 import com.google.common.base.MoreObjects;
 import com.google.common.base.MoreObjects.ToStringHelper;
+import com.google.common.base.VerifyException;
 import com.google.common.collect.Collections2;
 import com.google.common.collect.ImmutableList;
 import java.util.HashMap;
@@ -21,27 +21,23 @@ import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.persisted.FrontendClientMetadata;
-import org.opendaylight.controller.cluster.datastore.persisted.FrontendHistoryMetadata;
 import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
 import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet;
-import org.opendaylight.yangtools.concepts.Builder;
-import org.opendaylight.yangtools.concepts.Identifiable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
  * This class is NOT thread-safe.
  */
-abstract class FrontendClientMetadataBuilder implements Builder<FrontendClientMetadata>,
-        Identifiable<ClientIdentifier> {
+abstract sealed class FrontendClientMetadataBuilder {
     static final class Disabled extends FrontendClientMetadataBuilder {
-        Disabled(final String shardName, final ClientIdentifier identifier) {
-            super(shardName, identifier);
+        Disabled(final String shardName, final ClientIdentifier clientId) {
+            super(shardName, clientId);
         }
 
         @Override
-        public FrontendClientMetadata build() {
-            return new FrontendClientMetadata(getIdentifier(), ImmutableUnsignedLongSet.of(), ImmutableList.of());
+        FrontendClientMetadata build() {
+            return new FrontendClientMetadata(clientId(), ImmutableUnsignedLongSet.of(), ImmutableList.of());
         }
 
         @Override
@@ -81,7 +77,7 @@ abstract class FrontendClientMetadataBuilder implements Builder<FrontendClientMe
 
         @Override
         LeaderFrontendState toLeaderState(final Shard shard) {
-            return new LeaderFrontendState.Disabled(shard.persistenceId(), getIdentifier(), shard.getDataStore());
+            return new LeaderFrontendState.Disabled(shard.persistenceId(), clientId(), shard.getDataStore());
         }
     }
 
@@ -90,8 +86,8 @@ abstract class FrontendClientMetadataBuilder implements Builder<FrontendClientMe
         private final MutableUnsignedLongSet purgedHistories;
         private final LocalHistoryIdentifier standaloneId;
 
-        Enabled(final String shardName, final ClientIdentifier identifier) {
-            super(shardName, identifier);
+        Enabled(final String shardName, final ClientIdentifier clientId) {
+            super(shardName, clientId);
 
             purgedHistories = MutableUnsignedLongSet.of();
 
@@ -101,33 +97,33 @@ abstract class FrontendClientMetadataBuilder implements Builder<FrontendClientMe
         }
 
         Enabled(final String shardName, final FrontendClientMetadata meta) {
-            super(shardName, meta.getIdentifier());
+            super(shardName, meta.clientId());
 
             purgedHistories = meta.getPurgedHistories().mutableCopy();
-            for (FrontendHistoryMetadata h : meta.getCurrentHistories()) {
-                final FrontendHistoryMetadataBuilder b = new FrontendHistoryMetadataBuilder(getIdentifier(), h);
-                currentHistories.put(b.getIdentifier(), b);
+            for (var historyMeta : meta.getCurrentHistories()) {
+                final var builder = new FrontendHistoryMetadataBuilder(clientId(), historyMeta);
+                currentHistories.put(builder.getIdentifier(), builder);
             }
 
             // Sanity check and recovery
             standaloneId = standaloneHistoryId();
             if (!currentHistories.containsKey(standaloneId)) {
                 LOG.warn("{}: Client {} recovered histories {} do not contain stand-alone history, attempting recovery",
-                    shardName, getIdentifier(), currentHistories);
+                    shardName, clientId(), currentHistories);
                 currentHistories.put(standaloneId, new FrontendHistoryMetadataBuilder(standaloneId));
             }
         }
 
         @Override
-        public FrontendClientMetadata build() {
-            return new FrontendClientMetadata(getIdentifier(), purgedHistories.immutableCopy(),
+        FrontendClientMetadata build() {
+            return new FrontendClientMetadata(clientId(), purgedHistories.immutableCopy(),
                 Collections2.transform(currentHistories.values(), FrontendHistoryMetadataBuilder::build));
         }
 
         @Override
         void onHistoryCreated(final LocalHistoryIdentifier historyId) {
-            final FrontendHistoryMetadataBuilder newMeta = new FrontendHistoryMetadataBuilder(historyId);
-            final FrontendHistoryMetadataBuilder oldMeta = currentHistories.putIfAbsent(historyId, newMeta);
+            final var newMeta = new FrontendHistoryMetadataBuilder(historyId);
+            final var oldMeta = currentHistories.putIfAbsent(historyId, newMeta);
             if (oldMeta != null) {
                 // This should not be happening, warn about it
                 LOG.warn("{}: Reused local history {}", shardName(), historyId);
@@ -138,7 +134,7 @@ abstract class FrontendClientMetadataBuilder implements Builder<FrontendClientMe
 
         @Override
         void onHistoryClosed(final LocalHistoryIdentifier historyId) {
-            final FrontendHistoryMetadataBuilder builder = currentHistories.get(historyId);
+            final var builder = currentHistories.get(historyId);
             if (builder != null) {
                 builder.onHistoryClosed();
                 LOG.debug("{}: Closed history {}", shardName(), historyId);
@@ -149,7 +145,7 @@ abstract class FrontendClientMetadataBuilder implements Builder<FrontendClientMe
 
         @Override
         void onHistoryPurged(final LocalHistoryIdentifier historyId) {
-            final FrontendHistoryMetadataBuilder history = currentHistories.remove(historyId);
+            final var history = currentHistories.remove(historyId);
             final long historyBits = historyId.getHistoryId();
             if (history == null) {
                 if (!purgedHistories.contains(historyBits)) {
@@ -166,7 +162,7 @@ abstract class FrontendClientMetadataBuilder implements Builder<FrontendClientMe
 
         @Override
         void onTransactionAborted(final TransactionIdentifier txId) {
-            final FrontendHistoryMetadataBuilder history = getHistory(txId);
+            final var history = getHistory(txId);
             if (history != null) {
                 history.onTransactionAborted(txId);
                 LOG.debug("{}: Aborted transaction {}", shardName(), txId);
@@ -177,7 +173,7 @@ abstract class FrontendClientMetadataBuilder implements Builder<FrontendClientMe
 
         @Override
         void onTransactionCommitted(final TransactionIdentifier txId) {
-            final FrontendHistoryMetadataBuilder history = getHistory(txId);
+            final var history = getHistory(txId);
             if (history != null) {
                 history.onTransactionCommitted(txId);
                 LOG.debug("{}: Committed transaction {}", shardName(), txId);
@@ -188,7 +184,7 @@ abstract class FrontendClientMetadataBuilder implements Builder<FrontendClientMe
 
         @Override
         void onTransactionPurged(final TransactionIdentifier txId) {
-            final FrontendHistoryMetadataBuilder history = getHistory(txId);
+            final var history = getHistory(txId);
             if (history != null) {
                 history.onTransactionPurged(txId);
                 LOG.debug("{}: Purged transaction {}", shardName(), txId);
@@ -212,26 +208,29 @@ abstract class FrontendClientMetadataBuilder implements Builder<FrontendClientMe
         LeaderFrontendState toLeaderState(final Shard shard) {
             // Note: we have to make sure to *copy* all current state and not leak any views, otherwise leader/follower
             //       interactions would get intertwined leading to inconsistencies.
-            final Map<LocalHistoryIdentifier, LocalFrontendHistory> histories = new HashMap<>();
-            for (FrontendHistoryMetadataBuilder e : currentHistories.values()) {
-                if (e.getIdentifier().getHistoryId() != 0) {
-                    final AbstractFrontendHistory state = e.toLeaderState(shard);
-                    verify(state instanceof LocalFrontendHistory, "Unexpected state %s", state);
-                    histories.put(e.getIdentifier(), (LocalFrontendHistory) state);
+            final var histories = new HashMap<LocalHistoryIdentifier, LocalFrontendHistory>();
+            for (var historyMetaBuilder : currentHistories.values()) {
+                final var historyId = historyMetaBuilder.getIdentifier();
+                if (historyId.getHistoryId() != 0) {
+                    final var state = historyMetaBuilder.toLeaderState(shard);
+                    if (state instanceof LocalFrontendHistory localState) {
+                        histories.put(historyId, localState);
+                    } else {
+                        throw new VerifyException("Unexpected state " + state);
+                    }
                 }
             }
 
             final AbstractFrontendHistory singleHistory;
-            final FrontendHistoryMetadataBuilder singleHistoryMeta = currentHistories.get(
-                new LocalHistoryIdentifier(getIdentifier(), 0));
+            final var singleHistoryMeta = currentHistories.get(new LocalHistoryIdentifier(clientId(), 0));
             if (singleHistoryMeta == null) {
-                final ShardDataTree tree = shard.getDataStore();
-                singleHistory = StandaloneFrontendHistory.create(shard.persistenceId(), getIdentifier(), tree);
+                final var tree = shard.getDataStore();
+                singleHistory = StandaloneFrontendHistory.create(shard.persistenceId(), clientId(), tree);
             } else {
                 singleHistory = singleHistoryMeta.toLeaderState(shard);
             }
 
-            return new LeaderFrontendState.Enabled(shard.persistenceId(), getIdentifier(), shard.getDataStore(),
+            return new LeaderFrontendState.Enabled(shard.persistenceId(), clientId(), shard.getDataStore(),
                 purgedHistories.mutableCopy(), singleHistory, histories);
         }
 
@@ -259,36 +258,37 @@ abstract class FrontendClientMetadataBuilder implements Builder<FrontendClientMe
         }
 
         private LocalHistoryIdentifier standaloneHistoryId() {
-            return new LocalHistoryIdentifier(getIdentifier(), 0);
+            return new LocalHistoryIdentifier(clientId(), 0);
         }
     }
 
     private static final Logger LOG = LoggerFactory.getLogger(FrontendClientMetadataBuilder.class);
 
-    private final @NonNull ClientIdentifier identifier;
+    private final @NonNull ClientIdentifier clientId;
     private final @NonNull String shardName;
 
-    FrontendClientMetadataBuilder(final String shardName, final ClientIdentifier identifier) {
+    FrontendClientMetadataBuilder(final String shardName, final ClientIdentifier clientId) {
         this.shardName = requireNonNull(shardName);
-        this.identifier = requireNonNull(identifier);
+        this.clientId = requireNonNull(clientId);
     }
 
     static FrontendClientMetadataBuilder of(final String shardName, final FrontendClientMetadata meta) {
         // Completely empty histories imply disabled state, as otherwise we'd have a record of the single history --
         // either purged or active
         return meta.getCurrentHistories().isEmpty() && meta.getPurgedHistories().isEmpty()
-            ? new Disabled(shardName, meta.getIdentifier()) : new Enabled(shardName, meta);
+            ? new Disabled(shardName, meta.clientId()) : new Enabled(shardName, meta);
     }
 
-    @Override
-    public final ClientIdentifier getIdentifier() {
-        return identifier;
+    final ClientIdentifier clientId() {
+        return clientId;
     }
 
     final String shardName() {
         return shardName;
     }
 
+    abstract FrontendClientMetadata build();
+
     abstract void onHistoryCreated(LocalHistoryIdentifier historyId);
 
     abstract void onHistoryClosed(LocalHistoryIdentifier historyId);
@@ -317,6 +317,6 @@ abstract class FrontendClientMetadataBuilder implements Builder<FrontendClientMe
     }
 
     ToStringHelper addToStringAttributes(final ToStringHelper helper) {
-        return helper.add("identifier", identifier);
+        return helper.add("clientId", clientId);
     }
 }
index 8a1efbbb4376fadfb8d0824a12b5054ef1449b49..f869e7ac5ac679658ca146ab38685ebf91d65947 100644 (file)
@@ -21,12 +21,9 @@ import org.opendaylight.controller.cluster.datastore.persisted.FrontendHistoryMe
 import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
 import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet;
 import org.opendaylight.controller.cluster.datastore.utils.UnsignedLongBitmap;
-import org.opendaylight.yangtools.concepts.Builder;
 import org.opendaylight.yangtools.concepts.Identifiable;
 
-final class FrontendHistoryMetadataBuilder implements Builder<FrontendHistoryMetadata>,
-        Identifiable<LocalHistoryIdentifier> {
-
+final class FrontendHistoryMetadataBuilder implements Identifiable<LocalHistoryIdentifier> {
     private final @NonNull Map<UnsignedLong, Boolean> closedTransactions;
     private final @NonNull MutableUnsignedLongSet purgedTransactions;
     private final @NonNull LocalHistoryIdentifier identifier;
@@ -51,7 +48,6 @@ final class FrontendHistoryMetadataBuilder implements Builder<FrontendHistoryMet
         return identifier;
     }
 
-    @Override
     public FrontendHistoryMetadata build() {
         return new FrontendHistoryMetadata(identifier.getHistoryId(), identifier.getCookie(), closed,
             UnsignedLongBitmap.copyOf(closedTransactions), purgedTransactions.immutableCopy());
index e3a18997e43e68fc3597936e99a4a58890d73403..abb97e59a43701df5f223e526bc8a3596e8c044f 100644 (file)
@@ -20,7 +20,6 @@ import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.persisted.FrontendClientMetadata;
 import org.opendaylight.controller.cluster.datastore.persisted.FrontendShardDataTreeSnapshotMetadata;
 import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
 import org.slf4j.Logger;
@@ -59,13 +58,13 @@ final class FrontendMetadata extends ShardDataTreeMetadata<FrontendShardDataTree
         LOG.debug("{}: applying snapshot {} over clients {}", shardName, snapshot, clients);
         clients.clear();
 
-        for (FrontendClientMetadata m : snapshot.getClients()) {
-            LOG.debug("{}: applying metadata {}", shardName, m);
-            final FrontendClientMetadataBuilder b = FrontendClientMetadataBuilder.of(shardName, m);
-            final FrontendIdentifier client = m.getIdentifier().getFrontendId();
+        for (var clientMeta : snapshot.getClients()) {
+            LOG.debug("{}: applying metadata {}", shardName, clientMeta);
+            final var builder = FrontendClientMetadataBuilder.of(shardName, clientMeta);
+            final var frontendId = clientMeta.clientId().getFrontendId();
 
-            LOG.debug("{}: client {} updated to {}", shardName, client, b);
-            clients.put(client, b);
+            LOG.debug("{}: client {} updated to {}", shardName, frontendId, builder);
+            clients.put(frontendId, builder);
         }
     }
 
@@ -76,13 +75,13 @@ final class FrontendMetadata extends ShardDataTreeMetadata<FrontendShardDataTree
     }
 
     private FrontendClientMetadataBuilder ensureClient(final ClientIdentifier id) {
-        final FrontendClientMetadataBuilder existing = clients.get(id.getFrontendId());
-        if (existing != null && id.equals(existing.getIdentifier())) {
+        final var existing = clients.get(id.getFrontendId());
+        if (existing != null && id.equals(existing.clientId())) {
             return existing;
         }
 
-        final FrontendClientMetadataBuilder client = new FrontendClientMetadataBuilder.Enabled(shardName, id);
-        final FrontendClientMetadataBuilder previous = clients.put(id.getFrontendId(), client);
+        final var client = new FrontendClientMetadataBuilder.Enabled(shardName, id);
+        final var previous = clients.put(id.getFrontendId(), client);
         if (previous != null) {
             LOG.debug("{}: Replaced client {} with {}", shardName, previous, client);
         } else {
@@ -136,8 +135,8 @@ final class FrontendMetadata extends ShardDataTreeMetadata<FrontendShardDataTree
     }
 
     void disableTracking(final ClientIdentifier clientId) {
-        final FrontendIdentifier frontendId = clientId.getFrontendId();
-        final FrontendClientMetadataBuilder client = clients.get(frontendId);
+        final var frontendId = clientId.getFrontendId();
+        final var client = clients.get(frontendId);
         if (client == null) {
             // When we have not seen the client before, we still need to disable tracking for him since this only gets
             // triggered once.
@@ -145,7 +144,7 @@ final class FrontendMetadata extends ShardDataTreeMetadata<FrontendShardDataTree
             clients.put(frontendId, new FrontendClientMetadataBuilder.Disabled(shardName, clientId));
             return;
         }
-        if (!clientId.equals(client.getIdentifier())) {
+        if (!clientId.equals(client.clientId())) {
             LOG.debug("{}: disableTracking {} does not match client {}, ignoring", shardName, clientId, client);
             return;
         }
@@ -159,7 +158,7 @@ final class FrontendMetadata extends ShardDataTreeMetadata<FrontendShardDataTree
 
     ImmutableSet<ClientIdentifier> getClients() {
         return clients.values().stream()
-                .map(FrontendClientMetadataBuilder::getIdentifier)
-                .collect(ImmutableSet.toImmutableSet());
+            .map(FrontendClientMetadataBuilder::clientId)
+            .collect(ImmutableSet.toImmutableSet());
     }
 }
index e8b99e60a540288b77bbcba5fecc0e1965f1733c..e1b8a3fb94a8aed70bdc44c41d0f592a70227d77 100644 (file)
@@ -40,7 +40,7 @@ final class FrontendReadOnlyTransaction extends FrontendTransaction {
     private FrontendReadOnlyTransaction(final AbstractFrontendHistory history,
             final ReadOnlyShardDataTreeTransaction transaction) {
         super(history, transaction.getIdentifier());
-        this.openTransaction = requireNonNull(transaction);
+        openTransaction = requireNonNull(transaction);
     }
 
     static FrontendReadOnlyTransaction create(final AbstractFrontendHistory history,
@@ -75,7 +75,8 @@ final class FrontendReadOnlyTransaction extends FrontendTransaction {
         // The only valid request here is with abort protocol
         final Optional<PersistenceProtocol> optProto = request.getPersistenceProtocol();
         checkArgument(optProto.isPresent(), "Commit protocol is missing in %s", request);
-        checkArgument(optProto.get() == PersistenceProtocol.ABORT, "Unsupported commit protocol in %s", request);
+        checkArgument(optProto.orElseThrow() == PersistenceProtocol.ABORT, "Unsupported commit protocol in %s",
+            request);
         openTransaction.abort(() -> recordAndSendSuccess(envelope, now,
             new ModifyTransactionSuccess(request.getTarget(), request.getSequence())));
     }
index 4bd2a5725758b228c1803414b74e116810ca40b9..c626791547aafea037bb806d2ec1200b8ea0eb06 100644 (file)
@@ -42,9 +42,10 @@ import org.opendaylight.controller.cluster.access.concepts.RequestException;
 import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.UnsupportedRequestException;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -100,7 +101,7 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
 
         Ready(final ShardDataTreeCohort readyCohort) {
             this.readyCohort = requireNonNull(readyCohort);
-            this.stage = CommitStage.READY;
+            stage = CommitStage.READY;
         }
 
         @Override
@@ -163,13 +164,13 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
     private FrontendReadWriteTransaction(final AbstractFrontendHistory history, final TransactionIdentifier id,
             final ReadWriteShardDataTreeTransaction transaction) {
         super(history, id);
-        this.state = new Open(transaction);
+        state = new Open(transaction);
     }
 
     private FrontendReadWriteTransaction(final AbstractFrontendHistory history, final TransactionIdentifier id,
             final DataTreeModification mod) {
         super(history, id);
-        this.state = new Sealed(mod);
+        state = new Sealed(mod);
     }
 
     static FrontendReadWriteTransaction createOpen(final AbstractFrontendHistory history,
@@ -186,20 +187,20 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
     @Override
     TransactionSuccess<?> doHandleRequest(final TransactionRequest<?> request, final RequestEnvelope envelope,
             final long now) throws RequestException {
-        if (request instanceof ModifyTransactionRequest) {
-            return handleModifyTransaction((ModifyTransactionRequest) request, envelope, now);
-        } else if (request instanceof CommitLocalTransactionRequest) {
-            handleCommitLocalTransaction((CommitLocalTransactionRequest) request, envelope, now);
+        if (request instanceof ModifyTransactionRequest modifyRequest) {
+            return handleModifyTransaction(modifyRequest, envelope, now);
+        } else if (request instanceof CommitLocalTransactionRequest commitLocalRequest) {
+            handleCommitLocalTransaction(commitLocalRequest, envelope, now);
             return null;
-        } else if (request instanceof ExistsTransactionRequest) {
-            return handleExistsTransaction((ExistsTransactionRequest) request);
-        } else if (request instanceof ReadTransactionRequest) {
-            return handleReadTransaction((ReadTransactionRequest) request);
-        } else if (request instanceof TransactionPreCommitRequest) {
-            handleTransactionPreCommit((TransactionPreCommitRequest) request, envelope, now);
+        } else if (request instanceof ExistsTransactionRequest existsRequest) {
+            return handleExistsTransaction(existsRequest);
+        } else if (request instanceof ReadTransactionRequest readRequest) {
+            return handleReadTransaction(readRequest);
+        } else if (request instanceof TransactionPreCommitRequest preCommitRequest) {
+            handleTransactionPreCommit(preCommitRequest, envelope, now);
             return null;
-        } else if (request instanceof TransactionDoCommitRequest) {
-            handleTransactionDoCommit((TransactionDoCommitRequest) request, envelope, now);
+        } else if (request instanceof TransactionDoCommitRequest doCommitRequest) {
+            handleTransactionDoCommit(doCommitRequest, envelope, now);
             return null;
         } else if (request instanceof TransactionAbortRequest) {
             return handleTransactionAbort(request.getSequence(), envelope, now);
@@ -349,9 +350,9 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
 
         final Ready ready = checkReady();
         startAbort();
-        ready.readyCohort.abort(new FutureCallback<Void>() {
+        ready.readyCohort.abort(new FutureCallback<>() {
             @Override
-            public void onSuccess(final Void result) {
+            public void onSuccess(final Empty result) {
                 recordAndSendSuccess(envelope, now, new TransactionAbortSuccess(getIdentifier(), sequence));
                 finishAbort();
             }
@@ -377,9 +378,9 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
             case READY:
                 ready.stage = CommitStage.CAN_COMMIT_PENDING;
                 LOG.debug("{}: Transaction {} initiating canCommit", persistenceId(), getIdentifier());
-                checkReady().readyCohort.canCommit(new FutureCallback<Void>() {
+                checkReady().readyCohort.canCommit(new FutureCallback<>() {
                     @Override
-                    public void onSuccess(final Void result) {
+                    public void onSuccess(final Empty result) {
                         successfulCanCommit(envelope, now);
                     }
 
@@ -429,9 +430,9 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
             case READY:
                 ready.stage = CommitStage.CAN_COMMIT_PENDING;
                 LOG.debug("{}: Transaction {} initiating direct canCommit", persistenceId(), getIdentifier());
-                ready.readyCohort.canCommit(new FutureCallback<Void>() {
+                ready.readyCohort.canCommit(new FutureCallback<>() {
                     @Override
-                    public void onSuccess(final Void result) {
+                    public void onSuccess(final Empty result) {
                         successfulDirectCanCommit(envelope, now);
                     }
 
@@ -511,7 +512,8 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
 
         final Optional<Exception> optFailure = request.getDelayedFailure();
         if (optFailure.isPresent()) {
-            state = new Ready(history().createFailedCohort(getIdentifier(), sealedModification, optFailure.get()));
+            state = new Ready(history().createFailedCohort(getIdentifier(), sealedModification,
+                optFailure.orElseThrow()));
         } else {
             state = new Ready(history().createReadyCohort(getIdentifier(), sealedModification, Optional.empty()));
         }
@@ -545,10 +547,10 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
             for (TransactionModification m : modifications) {
                 if (m instanceof TransactionDelete) {
                     modification.delete(m.getPath());
-                } else if (m instanceof TransactionWrite) {
-                    modification.write(m.getPath(), ((TransactionWrite) m).getData());
-                } else if (m instanceof TransactionMerge) {
-                    modification.merge(m.getPath(), ((TransactionMerge) m).getData());
+                } else if (m instanceof TransactionWrite write) {
+                    modification.write(m.getPath(), write.getData());
+                } else if (m instanceof TransactionMerge merge) {
+                    modification.merge(m.getPath(), merge.getData());
                 } else {
                     LOG.warn("{}: ignoring unhandled modification {}", persistenceId(), m);
                 }
@@ -566,7 +568,7 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
             return replyModifySuccess(request.getSequence());
         }
 
-        switch (maybeProto.get()) {
+        switch (maybeProto.orElseThrow()) {
             case ABORT:
                 if (ABORTING.equals(state)) {
                     LOG.debug("{}: Transaction {} already aborting", persistenceId(), getIdentifier());
@@ -592,7 +594,7 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
                 coordinatedCommit(envelope, now);
                 return null;
             default:
-                LOG.warn("{}: rejecting unsupported protocol {}", persistenceId(), maybeProto.get());
+                LOG.warn("{}: rejecting unsupported protocol {}", persistenceId(), maybeProto.orElseThrow());
                 throw new UnsupportedRequestException(request);
         }
     }
index e086e51a66f706d103168bbe36db50841e3c68eb..916cb75f5acd9d6c75c3372671cfad28ab5d1865 100644 (file)
@@ -12,7 +12,6 @@ import static java.util.Objects.requireNonNull;
 import com.google.common.base.MoreObjects;
 import com.google.common.base.MoreObjects.ToStringHelper;
 import java.util.HashMap;
-import java.util.Iterator;
 import java.util.Map;
 import org.eclipse.jdt.annotation.NonNull;
 import org.eclipse.jdt.annotation.Nullable;
@@ -40,10 +39,8 @@ import org.slf4j.LoggerFactory;
 /**
  * Frontend state as observed by the shard leader. This class is responsible for tracking generations and sequencing
  * in the frontend/backend conversation. This class is NOT thread-safe.
- *
- * @author Robert Varga
  */
-abstract class LeaderFrontendState implements Identifiable<ClientIdentifier> {
+abstract sealed class LeaderFrontendState implements Identifiable<ClientIdentifier> {
     static final class Disabled extends LeaderFrontendState {
         Disabled(final String persistenceId, final ClientIdentifier clientId, final ShardDataTree tree) {
             super(persistenceId, clientId, tree);
@@ -95,12 +92,12 @@ abstract class LeaderFrontendState implements Identifiable<ClientIdentifier> {
             checkRequestSequence(envelope);
 
             try {
-                if (request instanceof CreateLocalHistoryRequest) {
-                    return handleCreateHistory((CreateLocalHistoryRequest) request, envelope, now);
-                } else if (request instanceof DestroyLocalHistoryRequest) {
-                    return handleDestroyHistory((DestroyLocalHistoryRequest) request, envelope, now);
-                } else if (request instanceof PurgeLocalHistoryRequest) {
-                    return handlePurgeHistory((PurgeLocalHistoryRequest) request, envelope, now);
+                if (request instanceof CreateLocalHistoryRequest req) {
+                    return handleCreateHistory(req, envelope, now);
+                } else if (request instanceof DestroyLocalHistoryRequest req) {
+                    return handleDestroyHistory(req, envelope, now);
+                } else if (request instanceof PurgeLocalHistoryRequest req) {
+                    return handlePurgeHistory(req, envelope, now);
                 } else {
                     LOG.warn("{}: rejecting unsupported request {}", persistenceId(), request);
                     throw new UnsupportedRequestException(request);
@@ -116,7 +113,7 @@ abstract class LeaderFrontendState implements Identifiable<ClientIdentifier> {
             checkRequestSequence(envelope);
 
             try {
-                final LocalHistoryIdentifier lhId = request.getTarget().getHistoryId();
+                final var lhId = request.getTarget().getHistoryId();
                 final AbstractFrontendHistory history;
 
                 if (lhId.getHistoryId() != 0) {
@@ -163,8 +160,8 @@ abstract class LeaderFrontendState implements Identifiable<ClientIdentifier> {
 
         private LocalHistorySuccess handleCreateHistory(final CreateLocalHistoryRequest request,
                 final RequestEnvelope envelope, final long now) throws RequestException {
-            final LocalHistoryIdentifier historyId = request.getTarget();
-            final AbstractFrontendHistory existing = localHistories.get(historyId);
+            final var historyId = request.getTarget();
+            final var existing = localHistories.get(historyId);
             if (existing != null) {
                 // History already exists: report success
                 LOG.debug("{}: history {} already exists", persistenceId(), historyId);
@@ -184,7 +181,7 @@ abstract class LeaderFrontendState implements Identifiable<ClientIdentifier> {
             }
 
             // We have to send the response only after persistence has completed
-            final ShardDataTreeTransactionChain chain = tree().ensureTransactionChain(historyId, () -> {
+            final var chain = tree().ensureTransactionChain(historyId, () -> {
                 LOG.debug("{}: persisted history {}", persistenceId(), historyId);
                 envelope.sendSuccess(new LocalHistorySuccess(historyId, request.getSequence()),
                     tree().readTime() - now);
@@ -197,8 +194,8 @@ abstract class LeaderFrontendState implements Identifiable<ClientIdentifier> {
 
         private LocalHistorySuccess handleDestroyHistory(final DestroyLocalHistoryRequest request,
                 final RequestEnvelope envelope, final long now) {
-            final LocalHistoryIdentifier id = request.getTarget();
-            final LocalFrontendHistory existing = localHistories.get(id);
+            final var id = request.getTarget();
+            final var existing = localHistories.get(id);
             if (existing == null) {
                 // History does not exist: report success
                 LOG.debug("{}: history {} does not exist, nothing to destroy", persistenceId(), id);
@@ -211,8 +208,8 @@ abstract class LeaderFrontendState implements Identifiable<ClientIdentifier> {
 
         private LocalHistorySuccess handlePurgeHistory(final PurgeLocalHistoryRequest request,
                 final RequestEnvelope envelope, final long now) {
-            final LocalHistoryIdentifier id = request.getTarget();
-            final LocalFrontendHistory existing = localHistories.remove(id);
+            final var id = request.getTarget();
+            final var existing = localHistories.remove(id);
             if (existing == null) {
                 LOG.debug("{}: history {} has already been purged", persistenceId(), id);
                 return new LocalHistorySuccess(id, request.getSequence());
@@ -297,16 +294,16 @@ abstract class LeaderFrontendState implements Identifiable<ClientIdentifier> {
 
     void retire() {
         // Hunt down any transactions associated with this frontend
-        final Iterator<SimpleShardDataTreeCohort> it = tree.cohortIterator();
+        final var it = tree.cohortIterator();
         while (it.hasNext()) {
-            final SimpleShardDataTreeCohort cohort = it.next();
-            if (clientId.equals(cohort.getIdentifier().getHistoryId().getClientId())) {
+            final var cohort = it.next();
+            final var transactionId = cohort.transactionId();
+            if (clientId.equals(transactionId.getHistoryId().getClientId())) {
                 if (cohort.getState() != State.COMMIT_PENDING) {
-                    LOG.debug("{}: Retiring transaction {}", persistenceId, cohort.getIdentifier());
+                    LOG.debug("{}: Retiring transaction {}", persistenceId, transactionId);
                     it.remove();
                 } else {
-                    LOG.debug("{}: Transaction {} already committing, not retiring it", persistenceId,
-                        cohort.getIdentifier());
+                    LOG.debug("{}: Transaction {} already committing, not retiring it", persistenceId, transactionId);
                 }
             }
         }
index 3125ed651a5c6cbe64a511e83a2348beb4b3cb14..8226ac3c758cb36fbcb4ef7f6e943c065bbb80f5 100644 (file)
@@ -18,7 +18,7 @@ import java.util.SortedSet;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 /**
  * Chained transaction specialization of {@link AbstractFrontendHistory}. It prevents concurrent open transactions.
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalThreePhaseCommitCohort.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalThreePhaseCommitCohort.java
deleted file mode 100644 (file)
index ac279b7..0000000
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import akka.dispatch.Futures;
-import akka.dispatch.OnComplete;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.Optional;
-import java.util.SortedSet;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.mdsal.dom.spi.store.SnapshotBackedWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-
-/**
- * Fake {@link DOMStoreThreePhaseCommitCohort} instantiated for local transactions to conform with the DOM
- * transaction APIs. It is only used to hold the data from a local DOM transaction ready operation and to
- * initiate direct or coordinated commits from the front-end by sending the ReadyLocalTransaction message.
- * It is not actually called by the front-end to perform 3PC thus the canCommit/preCommit/commit methods
- * are no-ops.
- */
-class LocalThreePhaseCommitCohort implements DOMStoreThreePhaseCommitCohort {
-    private static final Logger LOG = LoggerFactory.getLogger(LocalThreePhaseCommitCohort.class);
-
-    private final SnapshotBackedWriteTransaction<TransactionIdentifier> transaction;
-    private final DataTreeModification modification;
-    private final ActorUtils actorUtils;
-    private final ActorSelection leader;
-    private final Exception operationError;
-
-    protected LocalThreePhaseCommitCohort(final ActorUtils actorUtils, final ActorSelection leader,
-            final SnapshotBackedWriteTransaction<TransactionIdentifier> transaction,
-            final DataTreeModification modification,
-            final Exception operationError) {
-        this.actorUtils = requireNonNull(actorUtils);
-        this.leader = requireNonNull(leader);
-        this.transaction = requireNonNull(transaction);
-        this.modification = requireNonNull(modification);
-        this.operationError = operationError;
-    }
-
-    protected LocalThreePhaseCommitCohort(final ActorUtils actorUtils, final ActorSelection leader,
-            final SnapshotBackedWriteTransaction<TransactionIdentifier> transaction, final Exception operationError) {
-        this.actorUtils = requireNonNull(actorUtils);
-        this.leader = requireNonNull(leader);
-        this.transaction = requireNonNull(transaction);
-        this.operationError = requireNonNull(operationError);
-        this.modification = null;
-    }
-
-    private Future<Object> initiateCommit(final boolean immediate,
-            final Optional<SortedSet<String>> participatingShardNames) {
-        if (operationError != null) {
-            return Futures.failed(operationError);
-        }
-
-        final ReadyLocalTransaction message = new ReadyLocalTransaction(transaction.getIdentifier(),
-                modification, immediate, participatingShardNames);
-        return actorUtils.executeOperationAsync(leader, message, actorUtils.getTransactionCommitOperationTimeout());
-    }
-
-    Future<ActorSelection> initiateCoordinatedCommit(final Optional<SortedSet<String>> participatingShardNames) {
-        final Future<Object> messageFuture = initiateCommit(false, participatingShardNames);
-        final Future<ActorSelection> ret = TransactionReadyReplyMapper.transform(messageFuture, actorUtils,
-                transaction.getIdentifier());
-        ret.onComplete(new OnComplete<ActorSelection>() {
-            @Override
-            public void onComplete(final Throwable failure, final ActorSelection success) {
-                if (failure != null) {
-                    LOG.warn("Failed to prepare transaction {} on backend", transaction.getIdentifier(), failure);
-                    transactionAborted(transaction);
-                    return;
-                }
-
-                LOG.debug("Transaction {} resolved to actor {}", transaction.getIdentifier(), success);
-            }
-        }, actorUtils.getClientDispatcher());
-
-        return ret;
-    }
-
-    Future<Object> initiateDirectCommit() {
-        final Future<Object> messageFuture = initiateCommit(true, Optional.empty());
-        messageFuture.onComplete(new OnComplete<Object>() {
-            @Override
-            public void onComplete(final Throwable failure, final Object message) {
-                if (failure != null) {
-                    LOG.warn("Failed to prepare transaction {} on backend", transaction.getIdentifier(), failure);
-                    transactionAborted(transaction);
-                } else if (CommitTransactionReply.isSerializedType(message)) {
-                    LOG.debug("Transaction {} committed successfully", transaction.getIdentifier());
-                    transactionCommitted(transaction);
-                } else {
-                    LOG.error("Transaction {} resulted in unhandled message type {}, aborting",
-                        transaction.getIdentifier(), message.getClass());
-                    transactionAborted(transaction);
-                }
-            }
-        }, actorUtils.getClientDispatcher());
-
-        return messageFuture;
-    }
-
-    @Override
-    public final ListenableFuture<Boolean> canCommit() {
-        // Intended no-op
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public final ListenableFuture<Void> preCommit() {
-        // Intended no-op
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public final ListenableFuture<Void> abort() {
-        // Intended no-op
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public final ListenableFuture<Void> commit() {
-        // Intended no-op
-        throw new UnsupportedOperationException();
-    }
-
-    protected void transactionAborted(final SnapshotBackedWriteTransaction<TransactionIdentifier> aborted) {
-    }
-
-    protected void transactionCommitted(final SnapshotBackedWriteTransaction<TransactionIdentifier> comitted) {
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionChain.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionChain.java
deleted file mode 100644 (file)
index c995e11..0000000
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.mdsal.dom.spi.store.AbstractSnapshotBackedTransactionChain;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.SnapshotBackedWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
-
-/**
- * Transaction chain instantiated on top of a locally-available DataTree. It does not instantiate
- * a transaction in the leader and rather chains transactions on top of themselves.
- */
-final class LocalTransactionChain extends AbstractSnapshotBackedTransactionChain<TransactionIdentifier>
-        implements LocalTransactionFactory {
-    private static final Throwable ABORTED = new Throwable("Transaction aborted");
-    private final TransactionChainProxy parent;
-    private final ActorSelection leader;
-    private final ReadOnlyDataTree tree;
-
-    LocalTransactionChain(final TransactionChainProxy parent, final ActorSelection leader,
-            final ReadOnlyDataTree tree) {
-        this.parent = requireNonNull(parent);
-        this.leader = requireNonNull(leader);
-        this.tree = requireNonNull(tree);
-    }
-
-    ReadOnlyDataTree getDataTree() {
-        return tree;
-    }
-
-    @Override
-    protected TransactionIdentifier nextTransactionIdentifier() {
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    protected boolean getDebugTransactions() {
-        return false;
-    }
-
-    @Override
-    protected DataTreeSnapshot takeSnapshot() {
-        return tree.takeSnapshot();
-    }
-
-    @Override
-    protected DOMStoreThreePhaseCommitCohort createCohort(
-            final SnapshotBackedWriteTransaction<TransactionIdentifier> transaction,
-            final DataTreeModification modification,
-            final Exception operationError) {
-        return new LocalChainThreePhaseCommitCohort(transaction, modification, operationError);
-    }
-
-    @Override
-    public DOMStoreReadTransaction newReadOnlyTransaction(TransactionIdentifier identifier) {
-        return super.newReadOnlyTransaction(identifier);
-    }
-
-    @Override
-    public DOMStoreReadWriteTransaction newReadWriteTransaction(TransactionIdentifier identifier) {
-        return super.newReadWriteTransaction(identifier);
-    }
-
-    @Override
-    public DOMStoreWriteTransaction newWriteOnlyTransaction(TransactionIdentifier identifier) {
-        return super.newWriteOnlyTransaction(identifier);
-    }
-
-    @SuppressWarnings({"unchecked", "checkstyle:IllegalCatch"})
-    @Override
-    public LocalThreePhaseCommitCohort onTransactionReady(DOMStoreWriteTransaction tx, Exception operationError) {
-        checkArgument(tx instanceof SnapshotBackedWriteTransaction);
-        if (operationError != null) {
-            return new LocalChainThreePhaseCommitCohort((SnapshotBackedWriteTransaction<TransactionIdentifier>)tx,
-                    operationError);
-        }
-
-        try {
-            return (LocalThreePhaseCommitCohort) tx.ready();
-        } catch (Exception e) {
-            // Unfortunately we need to cast to SnapshotBackedWriteTransaction here as it's required by
-            // LocalThreePhaseCommitCohort and the base class.
-            return new LocalChainThreePhaseCommitCohort((SnapshotBackedWriteTransaction<TransactionIdentifier>)tx, e);
-        }
-    }
-
-    private class LocalChainThreePhaseCommitCohort extends LocalThreePhaseCommitCohort {
-
-        protected LocalChainThreePhaseCommitCohort(SnapshotBackedWriteTransaction<TransactionIdentifier> transaction,
-                DataTreeModification modification, Exception operationError) {
-            super(parent.getActorUtils(), leader, transaction, modification, operationError);
-        }
-
-        protected LocalChainThreePhaseCommitCohort(SnapshotBackedWriteTransaction<TransactionIdentifier> transaction,
-                Exception operationError) {
-            super(parent.getActorUtils(), leader, transaction, operationError);
-        }
-
-        @Override
-        protected void transactionAborted(SnapshotBackedWriteTransaction<TransactionIdentifier> transaction) {
-            onTransactionFailed(transaction, ABORTED);
-        }
-
-        @Override
-        protected void transactionCommitted(SnapshotBackedWriteTransaction<TransactionIdentifier> transaction) {
-            onTransactionCommited(transaction);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionContext.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionContext.java
deleted file mode 100644 (file)
index 6b30069..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.Optional;
-import java.util.SortedSet;
-import java.util.function.Consumer;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.AbstractRead;
-import org.opendaylight.mdsal.common.api.ReadFailedException;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import scala.concurrent.Future;
-
-/**
- * Processes front-end transaction operations locally before being committed to the destination shard.
- * Instances of this class are used when the destination shard is local to the caller.
- *
- * @author Thomas Pantelis
- */
-abstract class LocalTransactionContext extends TransactionContext {
-    private final DOMStoreTransaction txDelegate;
-    private final LocalTransactionReadySupport readySupport;
-    private Exception operationError;
-
-    LocalTransactionContext(final DOMStoreTransaction txDelegate, final TransactionIdentifier identifier,
-            final LocalTransactionReadySupport readySupport) {
-        super(identifier);
-        this.txDelegate = requireNonNull(txDelegate);
-        this.readySupport = readySupport;
-    }
-
-    abstract DOMStoreWriteTransaction getWriteDelegate();
-
-    abstract DOMStoreReadTransaction getReadDelegate();
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    private void executeModification(final Consumer<DOMStoreWriteTransaction> consumer) {
-        incrementModificationCount();
-        if (operationError == null) {
-            try {
-                consumer.accept(getWriteDelegate());
-            } catch (Exception e) {
-                operationError = e;
-            }
-        }
-    }
-
-    @Override
-    void executeDelete(final YangInstanceIdentifier path, final Boolean havePermit) {
-        executeModification(transaction -> transaction.delete(path));
-    }
-
-    @Override
-    void executeMerge(final YangInstanceIdentifier path, final NormalizedNode data, final Boolean havePermit) {
-        executeModification(transaction -> transaction.merge(path, data));
-    }
-
-    @Override
-    void executeWrite(final YangInstanceIdentifier path, final NormalizedNode data, final Boolean havePermit) {
-        executeModification(transaction -> transaction.write(path, data));
-    }
-
-    @Override
-    <T> void executeRead(final AbstractRead<T> readCmd, final SettableFuture<T> proxyFuture,
-            final Boolean havePermit) {
-        Futures.addCallback(readCmd.apply(getReadDelegate()), new FutureCallback<T>() {
-            @Override
-            public void onSuccess(final T result) {
-                proxyFuture.set(result);
-            }
-
-            @Override
-            public void onFailure(final Throwable failure) {
-                proxyFuture.setException(failure instanceof Exception
-                        ? ReadFailedException.MAPPER.apply((Exception) failure) : failure);
-            }
-        }, MoreExecutors.directExecutor());
-    }
-
-    @Override
-    Future<ActorSelection> readyTransaction(final Boolean havePermit,
-            final Optional<SortedSet<String>> participatingShardNames) {
-        final LocalThreePhaseCommitCohort cohort = ready();
-        return cohort.initiateCoordinatedCommit(participatingShardNames);
-    }
-
-    @Override
-    Future<Object> directCommit(final Boolean havePermit) {
-        final LocalThreePhaseCommitCohort cohort = ready();
-        return cohort.initiateDirectCommit();
-    }
-
-    @Override
-    void closeTransaction() {
-        txDelegate.close();
-    }
-
-    private LocalThreePhaseCommitCohort ready() {
-        logModificationCount();
-        return readySupport.onTransactionReady(getWriteDelegate(), operationError);
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionFactory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionFactory.java
deleted file mode 100644 (file)
index e6be3a0..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-
-/**
- * A factory for creating local transactions used by {@link AbstractTransactionContextFactory} to instantiate
- * transactions on shards which are co-located with the shard leader.
- *
- * @author Thomas Pantelis
- */
-interface LocalTransactionFactory extends LocalTransactionReadySupport {
-    DOMStoreReadTransaction newReadOnlyTransaction(TransactionIdentifier identifier);
-
-    DOMStoreReadWriteTransaction newReadWriteTransaction(TransactionIdentifier identifier);
-
-    DOMStoreWriteTransaction newWriteOnlyTransaction(TransactionIdentifier identifier);
-}
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionFactoryImpl.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionFactoryImpl.java
deleted file mode 100644 (file)
index 8c84449..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.SnapshotBackedTransactions;
-import org.opendaylight.mdsal.dom.spi.store.SnapshotBackedWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
-
-/**
- * {@link LocalTransactionFactory} for instantiating backing transactions which are
- * disconnected from each other, ie not chained. These are used by {@link AbstractTransactionContextFactory}
- * to instantiate transactions on shards which are co-located with the shard leader.
- */
-final class LocalTransactionFactoryImpl extends TransactionReadyPrototype<TransactionIdentifier>
-        implements LocalTransactionFactory {
-
-    private final ActorSelection leader;
-    private final ReadOnlyDataTree dataTree;
-    private final ActorUtils actorUtils;
-
-    LocalTransactionFactoryImpl(final ActorUtils actorUtils, final ActorSelection leader,
-            final ReadOnlyDataTree dataTree) {
-        this.leader = requireNonNull(leader);
-        this.dataTree = requireNonNull(dataTree);
-        this.actorUtils = actorUtils;
-    }
-
-    ReadOnlyDataTree getDataTree() {
-        return dataTree;
-    }
-
-    @Override
-    public DOMStoreReadTransaction newReadOnlyTransaction(TransactionIdentifier identifier) {
-        return SnapshotBackedTransactions.newReadTransaction(identifier, false, dataTree.takeSnapshot());
-    }
-
-    @Override
-    public DOMStoreReadWriteTransaction newReadWriteTransaction(TransactionIdentifier identifier) {
-        return SnapshotBackedTransactions.newReadWriteTransaction(identifier, false, dataTree.takeSnapshot(), this);
-    }
-
-    @Override
-    public DOMStoreWriteTransaction newWriteOnlyTransaction(TransactionIdentifier identifier) {
-        return SnapshotBackedTransactions.newWriteTransaction(identifier, false, dataTree.takeSnapshot(), this);
-    }
-
-    @Override
-    protected void transactionAborted(final SnapshotBackedWriteTransaction<TransactionIdentifier> tx) {
-        // No-op
-    }
-
-    @Override
-    protected DOMStoreThreePhaseCommitCohort transactionReady(
-            final SnapshotBackedWriteTransaction<TransactionIdentifier> tx,
-            final DataTreeModification tree,
-            final Exception readyError) {
-        return new LocalThreePhaseCommitCohort(actorUtils, leader, tx, tree, readyError);
-    }
-
-    @SuppressWarnings({"unchecked", "checkstyle:IllegalCatch"})
-    @Override
-    public LocalThreePhaseCommitCohort onTransactionReady(DOMStoreWriteTransaction tx, Exception operationError) {
-        checkArgument(tx instanceof SnapshotBackedWriteTransaction);
-        if (operationError != null) {
-            return new LocalThreePhaseCommitCohort(actorUtils, leader,
-                    (SnapshotBackedWriteTransaction<TransactionIdentifier>)tx, operationError);
-        }
-
-        return (LocalThreePhaseCommitCohort) tx.ready();
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionReadySupport.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionReadySupport.java
deleted file mode 100644 (file)
index 103af19..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import org.eclipse.jdt.annotation.NonNull;
-import org.eclipse.jdt.annotation.Nullable;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-
-/**
- * Interface for a class that can "ready" a transaction.
- *
- * @author Thomas Pantelis
- */
-interface LocalTransactionReadySupport {
-    LocalThreePhaseCommitCohort onTransactionReady(@NonNull DOMStoreWriteTransaction tx,
-            @Nullable Exception operationError);
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/NoOpDOMStoreThreePhaseCommitCohort.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/NoOpDOMStoreThreePhaseCommitCohort.java
deleted file mode 100644 (file)
index 1f5f5bc..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.Collections;
-import java.util.List;
-import scala.concurrent.Future;
-
-/**
- * A {@link org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort}
- * instance given out for empty transactions.
- */
-final class NoOpDOMStoreThreePhaseCommitCohort extends AbstractThreePhaseCommitCohort<Object> {
-    static final NoOpDOMStoreThreePhaseCommitCohort INSTANCE = new NoOpDOMStoreThreePhaseCommitCohort();
-
-    private NoOpDOMStoreThreePhaseCommitCohort() {
-        // Hidden to prevent instantiation
-    }
-
-    @Override
-    public ListenableFuture<Boolean> canCommit() {
-        return IMMEDIATE_BOOLEAN_SUCCESS;
-    }
-
-    @Override
-    public ListenableFuture<Void> preCommit() {
-        return IMMEDIATE_VOID_SUCCESS;
-    }
-
-    @Override
-    public ListenableFuture<Void> abort() {
-        return IMMEDIATE_VOID_SUCCESS;
-    }
-
-    @Override
-    public ListenableFuture<Void> commit() {
-        return IMMEDIATE_VOID_SUCCESS;
-    }
-
-    @Override
-    List<Future<Object>> getCohortFutures() {
-        return Collections.emptyList();
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/NoOpTransactionContext.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/NoOpTransactionContext.java
deleted file mode 100644 (file)
index bfb0046..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorSelection;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.Optional;
-import java.util.SortedSet;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
-import org.opendaylight.controller.cluster.datastore.messages.AbstractRead;
-import org.opendaylight.mdsal.common.api.DataStoreUnavailableException;
-import org.opendaylight.mdsal.common.api.ReadFailedException;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-
-final class NoOpTransactionContext extends TransactionContext {
-    private static final Logger LOG = LoggerFactory.getLogger(NoOpTransactionContext.class);
-
-    private final Throwable failure;
-
-    NoOpTransactionContext(final Throwable failure, final TransactionIdentifier identifier) {
-        super(identifier);
-        this.failure = failure;
-    }
-
-    @Override
-    void closeTransaction() {
-        LOG.debug("NoOpTransactionContext {} closeTransaction called", getIdentifier());
-    }
-
-    @Override
-    Future<Object> directCommit(final Boolean havePermit) {
-        LOG.debug("Tx {} directCommit called, failure", getIdentifier(), failure);
-        return akka.dispatch.Futures.failed(failure);
-    }
-
-    @Override
-    Future<ActorSelection> readyTransaction(final Boolean havePermit,
-            final Optional<SortedSet<String>> participatingShardNamess) {
-        LOG.debug("Tx {} readyTransaction called, failure", getIdentifier(), failure);
-        return akka.dispatch.Futures.failed(failure);
-    }
-
-    @Override
-    <T> void executeRead(final AbstractRead<T> readCmd, final SettableFuture<T> proxyFuture, final Boolean havePermit) {
-        LOG.debug("Tx {} executeRead {} called path = {}", getIdentifier(), readCmd.getClass().getSimpleName(),
-                readCmd.getPath());
-
-        final Throwable t;
-        if (failure instanceof NoShardLeaderException) {
-            t = new DataStoreUnavailableException(failure.getMessage(), failure);
-        } else {
-            t = failure;
-        }
-        proxyFuture.setException(new ReadFailedException("Error executeRead " + readCmd.getClass().getSimpleName()
-                + " for path " + readCmd.getPath(), t));
-    }
-
-    @Override
-    void executeDelete(final YangInstanceIdentifier path, final Boolean havePermit) {
-        LOG.debug("Tx {} executeDelete called path = {}", getIdentifier(), path);
-    }
-
-    @Override
-    void executeMerge(final YangInstanceIdentifier path, final NormalizedNode data, final Boolean havePermit) {
-        LOG.debug("Tx {} executeMerge called path = {}", getIdentifier(), path);
-    }
-
-    @Override
-    void executeWrite(final YangInstanceIdentifier path, final NormalizedNode data, final Boolean havePermit) {
-        LOG.debug("Tx {} executeWrite called path = {}", getIdentifier(), path);
-    }
-}
index d53a8bb4683bf81672fb4beb548cf60bb3a54829..43e9c3e6fd75326594c8ebfa3c4d6868e464e909 100644 (file)
@@ -13,10 +13,9 @@ import com.google.common.annotations.Beta;
 import java.util.Map;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
+import org.opendaylight.mdsal.dom.api.DOMDataBroker.CommitCohortExtension;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistration;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
 import org.opendaylight.mdsal.dom.spi.store.DOMStore;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
@@ -24,7 +23,7 @@ import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreTreeChangePublisher;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.osgi.service.component.annotations.Activate;
 import org.osgi.service.component.annotations.Component;
@@ -39,7 +38,7 @@ import org.slf4j.LoggerFactory;
 @Beta
 @Component(factory = OSGiDOMStore.FACTORY_NAME, service = { DOMStore.class,  DistributedDataStoreInterface.class })
 public final class OSGiDOMStore
-        implements DistributedDataStoreInterface, DOMStoreTreeChangePublisher, DOMDataTreeCommitCohortRegistry {
+        implements DistributedDataStoreInterface, DOMStoreTreeChangePublisher, CommitCohortExtension {
     // OSGi DS Component Factory name
     static final String FACTORY_NAME = "org.opendaylight.controller.cluster.datastore.OSGiDOMStore";
     static final String DATASTORE_INST_PROP = ".datastore.instance";
@@ -47,30 +46,41 @@ public final class OSGiDOMStore
 
     private static final Logger LOG = LoggerFactory.getLogger(OSGiDOMStore.class);
 
-    private LogicalDatastoreType datastoreType;
+    private final LogicalDatastoreType datastoreType;
     private AbstractDataStore datastore;
 
+    @Activate
+    public OSGiDOMStore(final Map<String, ?> properties) {
+        datastoreType = (LogicalDatastoreType) verifyNotNull(properties.get(DATASTORE_TYPE_PROP));
+        datastore = (AbstractDataStore) verifyNotNull(properties.get(DATASTORE_INST_PROP));
+        LOG.info("Datastore service type {} activated", datastoreType);
+    }
+
+    @Deactivate
+    void deactivate() {
+        datastore = null;
+        LOG.info("Datastore service type {} deactivated", datastoreType);
+    }
+
     @Override
     public ActorUtils getActorUtils() {
         return datastore.getActorUtils();
     }
 
     @Override
-    public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerProxyListener(
-            final YangInstanceIdentifier shardLookup, final YangInstanceIdentifier insideShard,
-            final DOMDataTreeChangeListener delegate) {
+    public Registration registerProxyListener(final YangInstanceIdentifier shardLookup,
+            final YangInstanceIdentifier insideShard, final DOMDataTreeChangeListener delegate) {
         return datastore.registerProxyListener(shardLookup, insideShard, delegate);
     }
 
     @Override
-    public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(
-            final YangInstanceIdentifier treeId, final L listener) {
+    public Registration registerTreeChangeListener(final YangInstanceIdentifier treeId,
+            final DOMDataTreeChangeListener listener) {
         return datastore.registerTreeChangeListener(treeId, listener);
     }
 
     @Override
-    public <T extends DOMDataTreeCommitCohort> DOMDataTreeCommitCohortRegistration<T> registerCommitCohort(
-            final DOMDataTreeIdentifier path, final T cohort) {
+    public Registration registerCommitCohort(final DOMDataTreeIdentifier path, final DOMDataTreeCommitCohort cohort) {
         return datastore.registerCommitCohort(path, cohort);
     }
 
@@ -94,16 +104,9 @@ public final class OSGiDOMStore
         return datastore.newReadWriteTransaction();
     }
 
-    @Activate
-    void activate(final Map<String, ?> properties) {
-        datastoreType = (LogicalDatastoreType) verifyNotNull(properties.get(DATASTORE_TYPE_PROP));
-        datastore = (AbstractDataStore) verifyNotNull(properties.get(DATASTORE_INST_PROP));
-        LOG.info("Datastore service type {} activated", datastoreType);
-    }
-
-    @Deactivate
-    void deactivate() {
-        datastore = null;
-        LOG.info("Datastore service type {} deactivated", datastoreType);
+    @Override
+    public Registration registerLegacyTreeChangeListener(final YangInstanceIdentifier treeId,
+            final DOMDataTreeChangeListener listener) {
+        return datastore.registerLegacyTreeChangeListener(treeId, listener);
     }
 }
index 1480643e0b084a0e2ffc733c8210bf5aedd7928d..3e2db7dfed466b7681ef2a1a32f16027436b596b 100644 (file)
@@ -12,17 +12,15 @@ import static java.util.Objects.requireNonNull;
 import com.google.common.annotations.Beta;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
-import java.util.Dictionary;
-import java.util.Hashtable;
 import java.util.Map;
 import org.checkerframework.checker.lock.qual.GuardedBy;
-import org.gaul.modernizer_maven_annotations.SuppressModernizer;
 import org.opendaylight.controller.cluster.ActorSystemProvider;
 import org.opendaylight.controller.cluster.datastore.config.Configuration;
 import org.opendaylight.controller.cluster.datastore.config.ConfigurationImpl;
 import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfigProvider;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.mdsal.dom.api.DOMSchemaService;
+import org.osgi.framework.FrameworkUtil;
 import org.osgi.service.component.ComponentFactory;
 import org.osgi.service.component.ComponentInstance;
 import org.osgi.service.component.annotations.Activate;
@@ -53,7 +51,7 @@ public final class OSGiDistributedDataStore {
         private final String serviceType;
 
         @GuardedBy("this")
-        private ComponentInstance component;
+        private ComponentInstance<OSGiDOMStore> component;
         @GuardedBy("this")
         private boolean stopped;
 
@@ -87,17 +85,15 @@ public final class OSGiDistributedDataStore {
         }
 
         @Override
-        @SuppressModernizer
         public void onSuccess(final Object result) {
             LOG.debug("Distributed Datastore type {} reached initial settle", datastoreType);
 
             synchronized (this) {
                 if (!stopped) {
-                    final Dictionary<String, Object> dict = new Hashtable<>();
-                    dict.put(OSGiDOMStore.DATASTORE_TYPE_PROP, datastoreType);
-                    dict.put(OSGiDOMStore.DATASTORE_INST_PROP, datastore);
-                    dict.put("type", serviceType);
-                    component = datastoreFactory.newInstance(dict);
+                    component = datastoreFactory.newInstance(FrameworkUtil.asDictionary(Map.of(
+                        OSGiDOMStore.DATASTORE_TYPE_PROP, datastoreType,
+                        OSGiDOMStore.DATASTORE_INST_PROP, datastore,
+                        "type", serviceType)));
                     LOG.info("Distributed Datastore type {} started", datastoreType);
                 }
             }
@@ -111,26 +107,23 @@ public final class OSGiDistributedDataStore {
 
     private static final Logger LOG = LoggerFactory.getLogger(OSGiDistributedDataStore.class);
 
-    @Reference
-    DOMSchemaService schemaService = null;
-    @Reference
-    ActorSystemProvider actorSystemProvider = null;
-    @Reference
-    DatastoreContextIntrospectorFactory introspectorFactory = null;
-    @Reference
-    DatastoreSnapshotRestore snapshotRestore = null;
-    @Reference
-    ModuleShardConfigProvider configProvider = null;
-    @Reference(target = "(component.factory=" + OSGiDOMStore.FACTORY_NAME + ")")
-    ComponentFactory datastoreFactory = null;
-
+    private final ComponentFactory<OSGiDOMStore> datastoreFactory;
     private DatastoreState configDatastore;
     private DatastoreState operDatastore;
 
     @Activate
-    void activate(final Map<String, Object> properties) {
-        configDatastore = createDatastore(LogicalDatastoreType.CONFIGURATION, "distributed-config", properties, null);
-        operDatastore = createDatastore(LogicalDatastoreType.OPERATIONAL, "distributed-operational", properties,
+    public OSGiDistributedDataStore(@Reference final DOMSchemaService schemaService,
+            @Reference final ActorSystemProvider actorSystemProvider,
+            @Reference final DatastoreContextIntrospectorFactory introspectorFactory,
+            @Reference final DatastoreSnapshotRestore snapshotRestore,
+            @Reference final ModuleShardConfigProvider configProvider,
+            @Reference(target = "(component.factory=" + OSGiDOMStore.FACTORY_NAME + ")")
+            final ComponentFactory<OSGiDOMStore> datastoreFactory, final Map<String, Object> properties) {
+        this.datastoreFactory = requireNonNull(datastoreFactory);
+        configDatastore = createDatastore(schemaService, actorSystemProvider, snapshotRestore, introspectorFactory,
+            LogicalDatastoreType.CONFIGURATION, "distributed-config", properties, null);
+        operDatastore = createDatastore(schemaService, actorSystemProvider, snapshotRestore, introspectorFactory,
+            LogicalDatastoreType.OPERATIONAL, "distributed-operational", properties,
             new ConfigurationImpl(configProvider));
     }
 
@@ -149,14 +142,16 @@ public final class OSGiDistributedDataStore {
         configDatastore = null;
     }
 
-    private DatastoreState createDatastore(final LogicalDatastoreType datastoreType, final String serviceType,
-            final Map<String, Object> properties, final Configuration config) {
+    private DatastoreState createDatastore(final DOMSchemaService schemaService,
+            final ActorSystemProvider actorSystemProvider, final DatastoreSnapshotRestore snapshotRestore,
+            final DatastoreContextIntrospectorFactory introspectorFactory, final LogicalDatastoreType datastoreType,
+            final String serviceType, final Map<String, Object> properties,final Configuration config) {
         LOG.info("Distributed Datastore type {} starting", datastoreType);
-        final DatastoreContextIntrospector introspector = introspectorFactory.newInstance(datastoreType, properties);
-        final AbstractDataStore datastore = DistributedDataStoreFactory.createInstance(actorSystemProvider,
+        final var introspector = introspectorFactory.newInstance(datastoreType, properties);
+        final var datastore = DistributedDataStoreFactory.createInstance(actorSystemProvider,
             introspector.getContext(), introspector, snapshotRestore, config);
-        datastore.setCloseable(schemaService.registerSchemaContextListener(datastore));
-        final DatastoreState state = new DatastoreState(introspector, datastoreType, datastore, serviceType);
+        datastore.setCloseable(schemaService.registerSchemaContextListener(datastore::onModelContextUpdated));
+        final var state = new DatastoreState(introspector, datastoreType, datastore, serviceType);
 
         Futures.addCallback(datastore.initialSettleFuture(), state,
             // Note we are invoked from shard manager and therefore could block it, hence the round-trip to executor
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OperationLimiter.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OperationLimiter.java
deleted file mode 100644 (file)
index 3f0c98c..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.annotations.VisibleForTesting;
-import java.util.concurrent.Semaphore;
-import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Class for limiting operations.
- */
-public class OperationLimiter  {
-    private static final Logger LOG = LoggerFactory.getLogger(OperationLimiter.class);
-    private final TransactionIdentifier identifier;
-    private final long acquireTimeout;
-    private final Semaphore semaphore;
-    private final int maxPermits;
-
-    OperationLimiter(final TransactionIdentifier identifier, final int maxPermits, final long acquireTimeoutSeconds) {
-        this.identifier = requireNonNull(identifier);
-
-        checkArgument(acquireTimeoutSeconds >= 0);
-        this.acquireTimeout = TimeUnit.SECONDS.toNanos(acquireTimeoutSeconds);
-
-        checkArgument(maxPermits >= 0);
-        this.maxPermits = maxPermits;
-        this.semaphore = new Semaphore(maxPermits);
-    }
-
-    boolean acquire() {
-        return acquire(1);
-    }
-
-    boolean acquire(final int acquirePermits) {
-        try {
-            if (semaphore.tryAcquire(acquirePermits, acquireTimeout, TimeUnit.NANOSECONDS)) {
-                return true;
-            }
-        } catch (InterruptedException e) {
-            if (LOG.isDebugEnabled()) {
-                LOG.debug("Interrupted when trying to acquire operation permit for transaction {}", identifier, e);
-            } else {
-                LOG.warn("Interrupted when trying to acquire operation permit for transaction {}", identifier);
-            }
-        }
-
-        return false;
-    }
-
-    void release() {
-        release(1);
-    }
-
-    void release(final int permits) {
-        this.semaphore.release(permits);
-    }
-
-    @VisibleForTesting
-    TransactionIdentifier getIdentifier() {
-        return identifier;
-    }
-
-    @VisibleForTesting
-    int availablePermits() {
-        return semaphore.availablePermits();
-    }
-
-    /**
-     * Release all the permits.
-     */
-    public void releaseAll() {
-        this.semaphore.release(maxPermits - availablePermits());
-    }
-}
index 4df1352b1904467f15aa9d4df846884f7cb1745a..28042ecc3dc28746bb7bb0d4babfff3ce297fa2e 100644 (file)
@@ -8,11 +8,11 @@
 package org.opendaylight.controller.cluster.datastore;
 
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
 
 final class ReadOnlyShardDataTreeTransaction extends AbstractShardDataTreeTransaction<DataTreeSnapshot> {
     ReadOnlyShardDataTreeTransaction(final ShardDataTreeTransactionParent parent, final TransactionIdentifier id,
-        final DataTreeSnapshot snapshot) {
+            final DataTreeSnapshot snapshot) {
         super(parent, id, snapshot);
     }
 }
index f28d0d08b3e0a9e610bda2973da3a3dbc8b6682c..b55d24ac8b20f90bdee32f62811ff46ee1a5227a 100644 (file)
@@ -7,21 +7,21 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
-import com.google.common.base.Preconditions;
+import static com.google.common.base.Preconditions.checkState;
+
 import java.util.Optional;
 import java.util.SortedSet;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 public final class ReadWriteShardDataTreeTransaction extends AbstractShardDataTreeTransaction<DataTreeModification> {
-
     ReadWriteShardDataTreeTransaction(final ShardDataTreeTransactionParent parent, final TransactionIdentifier id,
-        final DataTreeModification modification) {
+            final DataTreeModification modification) {
         super(parent, id, modification);
     }
 
-    ShardDataTreeCohort ready(Optional<SortedSet<String>> participatingShardNames) {
-        Preconditions.checkState(close(), "Transaction is already closed");
+    ShardDataTreeCohort ready(final Optional<SortedSet<String>> participatingShardNames) {
+        checkState(close(), "Transaction is already closed");
         return getParent().finishTransaction(this, participatingShardNames);
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContext.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContext.java
deleted file mode 100644 (file)
index ade9c37..0000000
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static com.google.common.base.Preconditions.checkState;
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import akka.dispatch.Futures;
-import akka.dispatch.OnComplete;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.Optional;
-import java.util.SortedSet;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.AbstractRead;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
-import org.opendaylight.controller.cluster.datastore.modification.AbstractModification;
-import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
-import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
-import org.opendaylight.controller.cluster.datastore.modification.Modification;
-import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.common.api.ReadFailedException;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-
-/**
- * Redirects front-end transaction operations to a shard for processing. Instances of this class are used
- * when the destination shard is remote to the caller.
- *
- * @author Thomas Pantelis
- */
-final class RemoteTransactionContext extends TransactionContext {
-    private static final Logger LOG = LoggerFactory.getLogger(RemoteTransactionContext.class);
-
-    private final ActorUtils actorUtils;
-    private final ActorSelection actor;
-    private final OperationLimiter limiter;
-
-    private BatchedModifications batchedModifications;
-    private int totalBatchedModificationsSent;
-    private int batchPermits;
-
-    /**
-     * We have observed a failed modification batch. This transaction context is effectively doomed, as the backend
-     * does not have a correct view of the world. If this happens, we do not limit operations but rather short-cut them
-     * to a either a no-op (modifications) or a failure (reads). Once the transaction is ready, though, we send the
-     * message to resynchronize with the backend, sharing a 'lost message' failure path.
-     */
-    private volatile Throwable failedModification;
-
-    RemoteTransactionContext(final TransactionIdentifier identifier, final ActorSelection actor,
-            final ActorUtils actorUtils, final short remoteTransactionVersion, final OperationLimiter limiter) {
-        super(identifier, remoteTransactionVersion);
-        this.limiter = requireNonNull(limiter);
-        this.actor = actor;
-        this.actorUtils = actorUtils;
-    }
-
-    private ActorSelection getActor() {
-        return actor;
-    }
-
-    protected ActorUtils getActorUtils() {
-        return actorUtils;
-    }
-
-    @Override
-    void closeTransaction() {
-        LOG.debug("Tx {} closeTransaction called", getIdentifier());
-        TransactionContextCleanup.untrack(this);
-
-        actorUtils.sendOperationAsync(getActor(), new CloseTransaction(getTransactionVersion()).toSerializable());
-    }
-
-    @Override
-    Future<Object> directCommit(final Boolean havePermit) {
-        LOG.debug("Tx {} directCommit called", getIdentifier());
-
-        // Send the remaining batched modifications, if any, with the ready flag set.
-        bumpPermits(havePermit);
-        return sendBatchedModifications(true, true, Optional.empty());
-    }
-
-    @Override
-    Future<ActorSelection> readyTransaction(final Boolean havePermit,
-            final Optional<SortedSet<String>> participatingShardNames) {
-        logModificationCount();
-
-        LOG.debug("Tx {} readyTransaction called", getIdentifier());
-
-        // Send the remaining batched modifications, if any, with the ready flag set.
-
-        bumpPermits(havePermit);
-        Future<Object> lastModificationsFuture = sendBatchedModifications(true, false, participatingShardNames);
-
-        // Transform the last reply Future into a Future that returns the cohort actor path from
-        // the last reply message. That's the end result of the ready operation.
-        return TransactionReadyReplyMapper.transform(lastModificationsFuture, actorUtils, getIdentifier());
-    }
-
-    private void bumpPermits(final Boolean havePermit) {
-        if (Boolean.TRUE.equals(havePermit)) {
-            ++batchPermits;
-        }
-    }
-
-    private BatchedModifications newBatchedModifications() {
-        return new BatchedModifications(getIdentifier(), getTransactionVersion());
-    }
-
-    private void batchModification(final Modification modification, final boolean havePermit) {
-        incrementModificationCount();
-        if (havePermit) {
-            ++batchPermits;
-        }
-
-        if (batchedModifications == null) {
-            batchedModifications = newBatchedModifications();
-        }
-
-        batchedModifications.addModification(modification);
-
-        if (batchedModifications.getModifications().size()
-                >= actorUtils.getDatastoreContext().getShardBatchedModificationCount()) {
-            sendBatchedModifications();
-        }
-    }
-
-    @VisibleForTesting
-    Future<Object> sendBatchedModifications() {
-        return sendBatchedModifications(false, false, Optional.empty());
-    }
-
-    private Future<Object> sendBatchedModifications(final boolean ready, final boolean doCommitOnReady,
-            final Optional<SortedSet<String>> participatingShardNames) {
-        Future<Object> sent = null;
-        if (ready || batchedModifications != null && !batchedModifications.getModifications().isEmpty()) {
-            if (batchedModifications == null) {
-                batchedModifications = newBatchedModifications();
-            }
-
-            LOG.debug("Tx {} sending {} batched modifications, ready: {}", getIdentifier(),
-                    batchedModifications.getModifications().size(), ready);
-
-            batchedModifications.setDoCommitOnReady(doCommitOnReady);
-            batchedModifications.setTotalMessagesSent(++totalBatchedModificationsSent);
-
-            final BatchedModifications toSend = batchedModifications;
-            final int permitsToRelease = batchPermits;
-            batchPermits = 0;
-
-            if (ready) {
-                batchedModifications.setReady(participatingShardNames);
-                batchedModifications.setDoCommitOnReady(doCommitOnReady);
-                batchedModifications = null;
-            } else {
-                batchedModifications = newBatchedModifications();
-
-                final Throwable failure = failedModification;
-                if (failure != null) {
-                    // We have observed a modification failure, it does not make sense to send this batch. This speeds
-                    // up the time when the application could be blocked due to messages timing out and operation
-                    // limiter kicking in.
-                    LOG.debug("Tx {} modifications previously failed, not sending a non-ready batch", getIdentifier());
-                    limiter.release(permitsToRelease);
-                    return Futures.failed(failure);
-                }
-            }
-
-            sent = actorUtils.executeOperationAsync(getActor(), toSend.toSerializable(),
-                actorUtils.getTransactionCommitOperationTimeout());
-            sent.onComplete(new OnComplete<>() {
-                @Override
-                public void onComplete(final Throwable failure, final Object success) {
-                    if (failure != null) {
-                        LOG.debug("Tx {} modifications failed", getIdentifier(), failure);
-                        failedModification = failure;
-                    } else {
-                        LOG.debug("Tx {} modifications completed with {}", getIdentifier(), success);
-                    }
-                    limiter.release(permitsToRelease);
-                }
-            }, actorUtils.getClientDispatcher());
-        }
-
-        return sent;
-    }
-
-    @Override
-    void executeDelete(final YangInstanceIdentifier path, final Boolean havePermit) {
-        LOG.debug("Tx {} executeDelete called path = {}", getIdentifier(), path);
-        executeModification(new DeleteModification(path), havePermit);
-    }
-
-    @Override
-    void executeMerge(final YangInstanceIdentifier path, final NormalizedNode data, final Boolean havePermit) {
-        LOG.debug("Tx {} executeMerge called path = {}", getIdentifier(), path);
-        executeModification(new MergeModification(path, data), havePermit);
-    }
-
-    @Override
-    void executeWrite(final YangInstanceIdentifier path, final NormalizedNode data, final Boolean havePermit) {
-        LOG.debug("Tx {} executeWrite called path = {}", getIdentifier(), path);
-        executeModification(new WriteModification(path, data), havePermit);
-    }
-
-    private void executeModification(final AbstractModification modification, final Boolean havePermit) {
-        final boolean permitToRelease;
-        if (havePermit == null) {
-            permitToRelease = failedModification == null && acquireOperation();
-        } else {
-            permitToRelease = havePermit;
-        }
-
-        batchModification(modification, permitToRelease);
-    }
-
-    @Override
-    <T> void executeRead(final AbstractRead<T> readCmd, final SettableFuture<T> returnFuture,
-            final Boolean havePermit) {
-        LOG.debug("Tx {} executeRead {} called path = {}", getIdentifier(), readCmd.getClass().getSimpleName(),
-                readCmd.getPath());
-
-        final Throwable failure = failedModification;
-        if (failure != null) {
-            // If we know there was a previous modification failure, we must not send a read request, as it risks
-            // returning incorrect data. We check this before acquiring an operation simply because we want the app
-            // to complete this transaction as soon as possible.
-            returnFuture.setException(new ReadFailedException("Previous modification failed, cannot "
-                    + readCmd.getClass().getSimpleName() + " for path " + readCmd.getPath(), failure));
-            return;
-        }
-
-        // Send any batched modifications. This is necessary to honor the read uncommitted semantics of the
-        // public API contract.
-
-        final boolean permitToRelease = havePermit == null ? acquireOperation() : havePermit;
-        sendBatchedModifications();
-
-        OnComplete<Object> onComplete = new OnComplete<>() {
-            @Override
-            public void onComplete(final Throwable failure, final Object response) {
-                // We have previously acquired an operation, now release it, no matter what happened
-                if (permitToRelease) {
-                    limiter.release();
-                }
-
-                if (failure != null) {
-                    LOG.debug("Tx {} {} operation failed", getIdentifier(), readCmd.getClass().getSimpleName(),
-                        failure);
-
-                    returnFuture.setException(new ReadFailedException("Error checking "
-                        + readCmd.getClass().getSimpleName() + " for path " + readCmd.getPath(), failure));
-                } else {
-                    LOG.debug("Tx {} {} operation succeeded", getIdentifier(), readCmd.getClass().getSimpleName());
-                    readCmd.processResponse(response, returnFuture);
-                }
-            }
-        };
-
-        final Future<Object> future = actorUtils.executeOperationAsync(getActor(),
-            readCmd.asVersion(getTransactionVersion()).toSerializable(), actorUtils.getOperationTimeout());
-        future.onComplete(onComplete, actorUtils.getClientDispatcher());
-    }
-
-    /**
-     * Acquire operation from the limiter if the hand-off has completed. If the hand-off is still ongoing, this method
-     * does nothing.
-     *
-     * @return True if a permit was successfully acquired, false otherwise
-     */
-    private boolean acquireOperation() {
-        checkState(isOperationHandOffComplete(),
-            "Attempted to acquire execute operation permit for transaction %s on actor %s during handoff",
-            getIdentifier(), actor);
-
-        if (limiter.acquire()) {
-            return true;
-        }
-
-        LOG.warn("Failed to acquire execute operation permit for transaction {} on actor {}", getIdentifier(), actor);
-        return false;
-    }
-
-    @Override
-    boolean usesOperationLimiting() {
-        return true;
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContextSupport.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContextSupport.java
deleted file mode 100644 (file)
index 333d11b..0000000
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import akka.dispatch.OnComplete;
-import akka.pattern.AskTimeoutException;
-import akka.util.Timeout;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
-import org.opendaylight.controller.cluster.datastore.exceptions.ShardLeaderNotRespondingException;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
-/**
- * Handles creation of TransactionContext instances for remote transactions. This class creates
- * remote transactions, if necessary, by sending CreateTransaction messages with retries, up to a limit,
- * if the shard doesn't have a leader yet. This is done by scheduling a retry task after a short delay.
- * <p/>
- * The end result from a completed CreateTransaction message is a TransactionContext that is
- * used to perform transaction operations. Transaction operations that occur before the
- * CreateTransaction completes are cached via a DelayedTransactionContextWrapper and executed once the
- * CreateTransaction completes, successfully or not.
- */
-final class RemoteTransactionContextSupport {
-    private static final Logger LOG = LoggerFactory.getLogger(RemoteTransactionContextSupport.class);
-
-    private static final long CREATE_TX_TRY_INTERVAL_IN_MS = 1000;
-    private static final long MAX_CREATE_TX_MSG_TIMEOUT_IN_MS = 5000;
-
-    private final TransactionProxy parent;
-    private final String shardName;
-
-    /**
-     * The target primary shard.
-     */
-    private volatile PrimaryShardInfo primaryShardInfo;
-
-    /**
-     * The total timeout for creating a tx on the primary shard.
-     */
-    private volatile long totalCreateTxTimeout;
-
-    private final Timeout createTxMessageTimeout;
-
-    private final DelayedTransactionContextWrapper transactionContextWrapper;
-
-    RemoteTransactionContextSupport(final DelayedTransactionContextWrapper transactionContextWrapper,
-            final TransactionProxy parent, final String shardName) {
-        this.parent = requireNonNull(parent);
-        this.shardName = shardName;
-        this.transactionContextWrapper = transactionContextWrapper;
-
-        // For the total create tx timeout, use 2 times the election timeout. This should be enough time for
-        // a leader re-election to occur if we happen to hit it in transition.
-        totalCreateTxTimeout = parent.getActorUtils().getDatastoreContext().getShardRaftConfig()
-                .getElectionTimeOutInterval().toMillis() * 2;
-
-        // We'll use the operationTimeout for the the create Tx message timeout so it can be set appropriately
-        // for unit tests but cap it at MAX_CREATE_TX_MSG_TIMEOUT_IN_MS. The operationTimeout could be set
-        // larger than the totalCreateTxTimeout in production which we don't want.
-        long operationTimeout = parent.getActorUtils().getOperationTimeout().duration().toMillis();
-        createTxMessageTimeout = new Timeout(Math.min(operationTimeout, MAX_CREATE_TX_MSG_TIMEOUT_IN_MS),
-                TimeUnit.MILLISECONDS);
-    }
-
-    String getShardName() {
-        return shardName;
-    }
-
-    private TransactionType getTransactionType() {
-        return parent.getType();
-    }
-
-    private ActorUtils getActorUtils() {
-        return parent.getActorUtils();
-    }
-
-    private TransactionIdentifier getIdentifier() {
-        return parent.getIdentifier();
-    }
-
-    /**
-     * Sets the target primary shard and initiates a CreateTransaction try.
-     */
-    void setPrimaryShard(final PrimaryShardInfo newPrimaryShardInfo) {
-        this.primaryShardInfo = newPrimaryShardInfo;
-
-        if (getTransactionType() == TransactionType.WRITE_ONLY
-                && getActorUtils().getDatastoreContext().isWriteOnlyTransactionOptimizationsEnabled()) {
-            ActorSelection primaryShard = newPrimaryShardInfo.getPrimaryShardActor();
-
-            LOG.debug("Tx {} Primary shard {} found - creating WRITE_ONLY transaction context",
-                getIdentifier(), primaryShard);
-
-            // For write-only Tx's we prepare the transaction modifications directly on the shard actor
-            // to avoid the overhead of creating a separate transaction actor.
-            transactionContextWrapper.executePriorTransactionOperations(createValidTransactionContext(
-                    primaryShard, String.valueOf(primaryShard.path()), newPrimaryShardInfo.getPrimaryShardVersion()));
-        } else {
-            tryCreateTransaction();
-        }
-    }
-
-    /**
-      Performs a CreateTransaction try async.
-     */
-    private void tryCreateTransaction() {
-        LOG.debug("Tx {} Primary shard {} found - trying create transaction", getIdentifier(),
-                primaryShardInfo.getPrimaryShardActor());
-
-        Object serializedCreateMessage = new CreateTransaction(getIdentifier(), getTransactionType().ordinal(),
-                    primaryShardInfo.getPrimaryShardVersion()).toSerializable();
-
-        Future<Object> createTxFuture = getActorUtils().executeOperationAsync(
-                primaryShardInfo.getPrimaryShardActor(), serializedCreateMessage, createTxMessageTimeout);
-
-        createTxFuture.onComplete(new OnComplete<Object>() {
-            @Override
-            public void onComplete(final Throwable failure, final Object response) {
-                onCreateTransactionComplete(failure, response);
-            }
-        }, getActorUtils().getClientDispatcher());
-    }
-
-    private void tryFindPrimaryShard() {
-        LOG.debug("Tx {} Retrying findPrimaryShardAsync for shard {}", getIdentifier(), shardName);
-
-        this.primaryShardInfo = null;
-        Future<PrimaryShardInfo> findPrimaryFuture = getActorUtils().findPrimaryShardAsync(shardName);
-        findPrimaryFuture.onComplete(new OnComplete<PrimaryShardInfo>() {
-            @Override
-            public void onComplete(final Throwable failure, final PrimaryShardInfo newPrimaryShardInfo) {
-                onFindPrimaryShardComplete(failure, newPrimaryShardInfo);
-            }
-        }, getActorUtils().getClientDispatcher());
-    }
-
-    @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-            justification = "https://github.com/spotbugs/spotbugs/issues/811")
-    private void onFindPrimaryShardComplete(final Throwable failure, final PrimaryShardInfo newPrimaryShardInfo) {
-        if (failure == null) {
-            this.primaryShardInfo = newPrimaryShardInfo;
-            tryCreateTransaction();
-        } else {
-            LOG.debug("Tx {}: Find primary for shard {} failed", getIdentifier(), shardName, failure);
-
-            onCreateTransactionComplete(failure, null);
-        }
-    }
-
-    private void onCreateTransactionComplete(final Throwable failure, final Object response) {
-        // An AskTimeoutException will occur if the local shard forwards to an unavailable remote leader or
-        // the cached remote leader actor is no longer available.
-        boolean retryCreateTransaction = primaryShardInfo != null
-                && (failure instanceof NoShardLeaderException || failure instanceof AskTimeoutException);
-
-        // Schedule a retry unless we're out of retries. Note: totalCreateTxTimeout is volatile as it may
-        // be written by different threads however not concurrently, therefore decrementing it
-        // non-atomically here is ok.
-        if (retryCreateTransaction && totalCreateTxTimeout > 0) {
-            long scheduleInterval = CREATE_TX_TRY_INTERVAL_IN_MS;
-            if (failure instanceof AskTimeoutException) {
-                // Since we use the createTxMessageTimeout for the CreateTransaction request and it timed
-                // out, subtract it from the total timeout. Also since the createTxMessageTimeout period
-                // has already elapsed, we can immediately schedule the retry (10 ms is virtually immediate).
-                totalCreateTxTimeout -= createTxMessageTimeout.duration().toMillis();
-                scheduleInterval = 10;
-            }
-
-            totalCreateTxTimeout -= scheduleInterval;
-
-            LOG.debug("Tx {}: create tx on shard {} failed with exception \"{}\" - scheduling retry in {} ms",
-                    getIdentifier(), shardName, failure, scheduleInterval);
-
-            getActorUtils().getActorSystem().scheduler().scheduleOnce(
-                    FiniteDuration.create(scheduleInterval, TimeUnit.MILLISECONDS),
-                    this::tryFindPrimaryShard, getActorUtils().getClientDispatcher());
-            return;
-        }
-
-        createTransactionContext(failure, response);
-    }
-
-    private void createTransactionContext(final Throwable failure, final Object response) {
-        // Create the TransactionContext from the response or failure. Store the new
-        // TransactionContext locally until we've completed invoking the
-        // TransactionOperations. This avoids thread timing issues which could cause
-        // out-of-order TransactionOperations. Eg, on a modification operation, if the
-        // TransactionContext is non-null, then we directly call the TransactionContext.
-        // However, at the same time, the code may be executing the cached
-        // TransactionOperations. So to avoid thus timing, we don't publish the
-        // TransactionContext until after we've executed all cached TransactionOperations.
-        TransactionContext localTransactionContext;
-        if (failure != null) {
-            LOG.debug("Tx {} Creating NoOpTransaction because of error", getIdentifier(), failure);
-
-            Throwable resultingEx = failure;
-            if (failure instanceof AskTimeoutException) {
-                resultingEx = new ShardLeaderNotRespondingException(String.format(
-                        "Could not create a %s transaction on shard %s. The shard leader isn't responding.",
-                        parent.getType(), shardName), failure);
-            } else if (!(failure instanceof NoShardLeaderException)) {
-                resultingEx = new Exception(String.format(
-                    "Error creating %s transaction on shard %s", parent.getType(), shardName), failure);
-            }
-
-            localTransactionContext = new NoOpTransactionContext(resultingEx, getIdentifier());
-        } else if (CreateTransactionReply.isSerializedType(response)) {
-            localTransactionContext = createValidTransactionContext(
-                    CreateTransactionReply.fromSerializable(response));
-        } else {
-            IllegalArgumentException exception = new IllegalArgumentException(String.format(
-                    "Invalid reply type %s for CreateTransaction", response.getClass()));
-
-            localTransactionContext = new NoOpTransactionContext(exception, getIdentifier());
-        }
-        transactionContextWrapper.executePriorTransactionOperations(localTransactionContext);
-    }
-
-    private TransactionContext createValidTransactionContext(final CreateTransactionReply reply) {
-        LOG.debug("Tx {} Received {}", getIdentifier(), reply);
-
-        return createValidTransactionContext(getActorUtils().actorSelection(reply.getTransactionPath()),
-                reply.getTransactionPath(), primaryShardInfo.getPrimaryShardVersion());
-    }
-
-    private TransactionContext createValidTransactionContext(final ActorSelection transactionActor,
-            final String transactionPath, final short remoteTransactionVersion) {
-        final TransactionContext ret = new RemoteTransactionContext(transactionContextWrapper.getIdentifier(),
-                transactionActor, getActorUtils(), remoteTransactionVersion, transactionContextWrapper.getLimiter());
-
-        if (parent.getType() == TransactionType.READ_ONLY) {
-            TransactionContextCleanup.track(parent, ret);
-        }
-
-        return ret;
-    }
-}
-
index 832228961828eda1b1b5ea268a4d4c22264aca7c..857c2844ffcc75a52b416998755efae1e71e87b4 100644 (file)
@@ -14,10 +14,12 @@ import akka.actor.ActorRef;
 import akka.actor.Props;
 import com.google.common.collect.Iterables;
 import java.util.ArrayDeque;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Deque;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
+import java.util.List;
 import java.util.Map;
 import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
 import org.opendaylight.controller.cluster.datastore.messages.OnInitialData;
@@ -27,10 +29,10 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdent
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.data.api.schema.builder.DataContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNodes;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
 import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidateNodes;
+import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidates;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 
 final class RootDataTreeChangeListenerActor extends DataTreeChangeListenerActor {
@@ -41,7 +43,7 @@ final class RootDataTreeChangeListenerActor extends DataTreeChangeListenerActor
     private Deque<DataTreeChanged> otherMessages = new ArrayDeque<>();
 
     private RootDataTreeChangeListenerActor(final DOMDataTreeChangeListener listener, final int shardCount) {
-        super(listener, YangInstanceIdentifier.empty());
+        super(listener, YangInstanceIdentifier.of());
         this.shardCount = shardCount;
     }
 
@@ -83,7 +85,7 @@ final class RootDataTreeChangeListenerActor extends DataTreeChangeListenerActor
         /*
          * We need to make-pretend that the data coming into the listener is coming from a single logical entity, where
          * ordering is partially guaranteed (on shard boundaries). The data layout in shards is such that each DataTree
-         * is rooted at YangInstanceIdentifier.empty(), but their contents vary:
+         * is rooted at YangInstanceIdentifier.of(), but their contents vary:
          *
          * 1) non-default shards contain immediate children of root from one module
          * 2) default shard contains everything else
@@ -95,7 +97,10 @@ final class RootDataTreeChangeListenerActor extends DataTreeChangeListenerActor
          * Construct an overall NormalizedNode view of the entire datastore by combining first-level children from all
          * reported initial state reports, report that node as written and then report any additional deltas.
          */
-        final Deque<DataTreeCandidate> initialChanges = new ArrayDeque<>();
+        final List<DataTreeCandidate> initialChanges = new ArrayList<>();
+        // Reserve first item
+        initialChanges.add(null);
+
         final DataContainerNodeBuilder<NodeIdentifier, ContainerNode> rootBuilder = Builders.containerBuilder()
                 .withNodeIdentifier(NodeIdentifier.create(SchemaContext.NAME));
         for (Object message : initialMessages.values()) {
@@ -106,12 +111,12 @@ final class RootDataTreeChangeListenerActor extends DataTreeChangeListenerActor
                     final Iterator<DataTreeCandidate> it = changes.iterator();
                     initial = it.next();
                     // Append to changes to report as initial. This should not be happening (often?).
-                    it.forEachRemaining(initialChanges::addLast);
+                    it.forEachRemaining(initialChanges::add);
                 } else {
                     initial = Iterables.get(changes, 0);
                 }
 
-                final NormalizedNode root = initial.getRootNode().getDataAfter().orElseThrow();
+                final NormalizedNode root = initial.getRootNode().getDataAfter();
                 verify(root instanceof ContainerNode, "Unexpected root node %s", root);
                 ((ContainerNode) root).body().forEach(rootBuilder::withChild);
             }
@@ -119,8 +124,8 @@ final class RootDataTreeChangeListenerActor extends DataTreeChangeListenerActor
         // We will not be intercepting any other messages, allow initial state to be reclaimed as soon as possible
         initialMessages = null;
 
-        // Prepend combined initial changed and report initial changes and clear the map
-        initialChanges.addFirst(DataTreeCandidates.newDataTreeCandidate(YangInstanceIdentifier.empty(),
+        // Replace first element with the combined initial change, report initial changes and clear the map
+        initialChanges.set(0, DataTreeCandidates.newDataTreeCandidate(YangInstanceIdentifier.of(),
             DataTreeCandidateNodes.written(rootBuilder.build())));
         super.dataTreeChanged(new DataTreeChanged(initialChanges));
 
index 6f4a5f1d066746d10aaefeae0edb77d57e2b39ea..43cbb7e8720c14898dcfa36e6fa8a92dca0caef6 100644 (file)
@@ -16,7 +16,6 @@ import akka.actor.ActorSelection;
 import akka.actor.PoisonPill;
 import akka.dispatch.OnComplete;
 import com.google.common.collect.Maps;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -31,13 +30,12 @@ import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeCh
 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeNotificationListenerReply;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
+import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-final class RootDataTreeChangeListenerProxy<L extends DOMDataTreeChangeListener>
-        extends AbstractListenerRegistration<L> {
+final class RootDataTreeChangeListenerProxy<L extends DOMDataTreeChangeListener> extends AbstractObjectRegistration<L> {
     private abstract static class State {
 
     }
@@ -76,7 +74,7 @@ final class RootDataTreeChangeListenerProxy<L extends DOMDataTreeChangeListener>
             final Set<String> shardNames) {
         super(listener);
         this.actorUtils = requireNonNull(actorUtils);
-        this.state = new ResolveShards(shardNames.size());
+        state = new ResolveShards(shardNames.size());
 
         for (String shardName : shardNames) {
             actorUtils.findLocalShardAsync(shardName).onComplete(new OnComplete<ActorRef>() {
@@ -95,19 +93,17 @@ final class RootDataTreeChangeListenerProxy<L extends DOMDataTreeChangeListener>
         } else if (state instanceof ResolveShards) {
             // Simple case: just mark the fact we were closed, terminating when resolution finishes
             state = new Terminated();
-        } else if (state instanceof Subscribed) {
-            terminate((Subscribed) state);
+        } else if (state instanceof Subscribed subscribed) {
+            terminate(subscribed);
         } else {
             throw new IllegalStateException("Unhandled close in state " + state);
         }
     }
 
-    @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-            justification = "https://github.com/spotbugs/spotbugs/issues/811")
     private synchronized void onFindLocalShardComplete(final String shardName, final Throwable failure,
             final ActorRef shard) {
-        if (state instanceof ResolveShards) {
-            localShardsResolved((ResolveShards) state, shardName, failure, shard);
+        if (state instanceof ResolveShards resolveShards) {
+            localShardsResolved(resolveShards, shardName, failure, shard);
         } else {
             LOG.debug("{}: lookup for shard {} turned into a noop on state {}", logContext(), shardName, state);
         }
@@ -156,7 +152,7 @@ final class RootDataTreeChangeListenerProxy<L extends DOMDataTreeChangeListener>
 
         // Subscribe to all shards
         final RegisterDataTreeChangeListener regMessage = new RegisterDataTreeChangeListener(
-            YangInstanceIdentifier.empty(), dtclActor, true);
+            YangInstanceIdentifier.of(), dtclActor, true);
         for (Entry<String, Object> entry : localShards.entrySet()) {
             // Do not retain references to localShards
             final String shardName = entry.getKey();
@@ -172,11 +168,8 @@ final class RootDataTreeChangeListenerProxy<L extends DOMDataTreeChangeListener>
         }
     }
 
-    @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-            justification = "https://github.com/spotbugs/spotbugs/issues/811")
     private synchronized void onShardSubscribed(final String shardName, final Throwable failure, final Object result) {
-        if (state instanceof Subscribed) {
-            final Subscribed current = (Subscribed) state;
+        if (state instanceof Subscribed current) {
             if (failure != null) {
                 LOG.error("{}: Shard {} failed to subscribe, terminating listener {}", logContext(),
                     shardName,getInstance(), failure);
index a4d524299bde38d972663d42893934d8660d3364..1fcaa9d64d6b4a9eaee3377aa4fbcaea5ab5ebc4 100644 (file)
@@ -30,6 +30,7 @@ import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Range;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collection;
@@ -39,6 +40,7 @@ import java.util.Map;
 import java.util.Optional;
 import java.util.OptionalLong;
 import java.util.concurrent.TimeUnit;
+import java.util.function.Supplier;
 import org.eclipse.jdt.annotation.NonNull;
 import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.ABIVersion;
@@ -50,8 +52,6 @@ import org.opendaylight.controller.cluster.access.commands.OutOfSequenceEnvelope
 import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.Request;
 import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
@@ -64,7 +64,6 @@ import org.opendaylight.controller.cluster.common.actor.CommonConfig;
 import org.opendaylight.controller.cluster.common.actor.Dispatchers;
 import org.opendaylight.controller.cluster.common.actor.Dispatchers.DispatcherType;
 import org.opendaylight.controller.cluster.common.actor.MessageTracker;
-import org.opendaylight.controller.cluster.common.actor.MessageTracker.Error;
 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
 import org.opendaylight.controller.cluster.datastore.actors.JsonExportActor;
 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
@@ -107,16 +106,15 @@ import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.messages.RequestLeadership;
 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStoreProperties.ExportOnRecovery;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStoreProperties.ExportOnRecovery;
 import org.opendaylight.yangtools.concepts.Identifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.TreeType;
 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
-import org.opendaylight.yangtools.yang.model.api.EffectiveModelContextProvider;
 import scala.concurrent.duration.FiniteDuration;
 
 /**
@@ -182,6 +180,7 @@ public class Shard extends RaftActor {
 
     private DatastoreContext datastoreContext;
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     private final ShardCommitCoordinator commitCoordinator;
 
     private long transactionCommitTimeout;
@@ -192,6 +191,7 @@ public class Shard extends RaftActor {
 
     private final MessageTracker appendEntriesReplyTracker;
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     private final ShardTransactionActorFactory transactionActorFactory;
 
     private final ShardSnapshotCohort snapshotCohort;
@@ -200,6 +200,7 @@ public class Shard extends RaftActor {
 
     private ShardSnapshot restoreFromSnapshot;
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     private final ShardTransactionMessageRetrySupport messageRetrySupport;
 
     @VisibleForTesting
@@ -217,6 +218,7 @@ public class Shard extends RaftActor {
 
     private final ActorRef exportActor;
 
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
     Shard(final AbstractBuilder<?, ?> builder) {
         super(builder.getId().toString(), builder.getPeerAddresses(),
                 Optional.of(builder.getDatastoreContext().getShardRaftConfig()), DataStoreVersions.CURRENT_VERSION);
@@ -228,16 +230,11 @@ public class Shard extends RaftActor {
         frontendMetadata = new FrontendMetadata(name);
         exportOnRecovery = datastoreContext.getExportOnRecovery();
 
-        switch (exportOnRecovery) {
-            case Json:
-                exportActor = getContext().actorOf(JsonExportActor.props(builder.getSchemaContext(),
-                        datastoreContext.getRecoveryExportBaseDir()));
-                break;
-            case Off:
-            default:
-                exportActor = null;
-                break;
-        }
+        exportActor = switch (exportOnRecovery) {
+            case Json -> getContext().actorOf(JsonExportActor.props(builder.getSchemaContext(),
+                datastoreContext.getRecoveryExportBaseDir()));
+            case Off -> null;
+        };
 
         setPersistence(datastoreContext.isPersistent());
 
@@ -302,7 +299,7 @@ public class Shard extends RaftActor {
     }
 
     private Optional<ActorRef> createRoleChangeNotifier(final String shardId) {
-        ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
+        ActorRef shardRoleChangeNotifier = getContext().actorOf(
             RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
         return Optional.of(shardRoleChangeNotifier);
     }
@@ -335,11 +332,10 @@ public class Shard extends RaftActor {
         switch (exportOnRecovery) {
             case Json:
                 if (message instanceof SnapshotOffer) {
-                    exportActor.tell(new JsonExportActor.ExportSnapshot(store.readCurrentData().get(), name),
-                            ActorRef.noSender());
-                } else if (message instanceof ReplicatedLogEntry) {
-                    exportActor.tell(new JsonExportActor.ExportJournal((ReplicatedLogEntry) message),
+                    exportActor.tell(new JsonExportActor.ExportSnapshot(store.readCurrentData().orElseThrow(), name),
                             ActorRef.noSender());
+                } else if (message instanceof ReplicatedLogEntry replicatedLogEntry) {
+                    exportActor.tell(new JsonExportActor.ExportJournal(replicatedLogEntry), ActorRef.noSender());
                 } else if (message instanceof RecoveryCompleted) {
                     exportActor.tell(new JsonExportActor.FinishExport(name), ActorRef.noSender());
                     exportActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
@@ -358,54 +354,37 @@ public class Shard extends RaftActor {
     @Override
     // non-final for TestShard
     protected void handleNonRaftCommand(final Object message) {
-        try (MessageTracker.Context context = appendEntriesReplyTracker.received(message)) {
-            final Optional<Error> maybeError = context.error();
+        try (var context = appendEntriesReplyTracker.received(message)) {
+            final var maybeError = context.error();
             if (maybeError.isPresent()) {
                 LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
-                    maybeError.get());
+                    maybeError.orElseThrow());
             }
 
             store.resetTransactionBatch();
 
-            if (message instanceof RequestEnvelope) {
-                handleRequestEnvelope((RequestEnvelope)message);
+            if (message instanceof RequestEnvelope request) {
+                handleRequestEnvelope(request);
             } else if (MessageAssembler.isHandledMessage(message)) {
                 handleRequestAssemblerMessage(message);
-            } else if (message instanceof ConnectClientRequest) {
-                handleConnectClient((ConnectClientRequest)message);
-            } else if (CreateTransaction.isSerializedType(message)) {
-                handleCreateTransaction(message);
-            } else if (message instanceof BatchedModifications) {
-                handleBatchedModifications((BatchedModifications)message);
-            } else if (message instanceof ForwardedReadyTransaction) {
-                handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
-            } else if (message instanceof ReadyLocalTransaction) {
-                handleReadyLocalTransaction((ReadyLocalTransaction)message);
-            } else if (CanCommitTransaction.isSerializedType(message)) {
-                handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
-            } else if (CommitTransaction.isSerializedType(message)) {
-                handleCommitTransaction(CommitTransaction.fromSerializable(message));
-            } else if (AbortTransaction.isSerializedType(message)) {
-                handleAbortTransaction(AbortTransaction.fromSerializable(message));
-            } else if (CloseTransactionChain.isSerializedType(message)) {
-                closeTransactionChain(CloseTransactionChain.fromSerializable(message));
+            } else if (message instanceof ConnectClientRequest request) {
+                handleConnectClient(request);
             } else if (message instanceof DataTreeChangedReply) {
                 // Ignore reply
-            } else if (message instanceof RegisterDataTreeChangeListener) {
-                treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader(), hasLeader());
-            } else if (message instanceof UpdateSchemaContext) {
-                updateSchemaContext((UpdateSchemaContext) message);
-            } else if (message instanceof PeerAddressResolved) {
-                PeerAddressResolved resolved = (PeerAddressResolved) message;
+            } else if (message instanceof RegisterDataTreeChangeListener request) {
+                treeChangeSupport.onMessage(request, isLeader(), hasLeader());
+            } else if (message instanceof UpdateSchemaContext request) {
+                updateSchemaContext(request);
+            } else if (message instanceof PeerAddressResolved resolved) {
                 setPeerAddress(resolved.getPeerId(), resolved.getPeerAddress());
             } else if (TX_COMMIT_TIMEOUT_CHECK_MESSAGE.equals(message)) {
                 commitTimeoutCheck();
-            } else if (message instanceof DatastoreContext) {
-                onDatastoreContext((DatastoreContext)message);
+            } else if (message instanceof DatastoreContext request) {
+                onDatastoreContext(request);
             } else if (message instanceof RegisterRoleChangeListener) {
-                roleChangeNotifier.get().forward(message, context());
-            } else if (message instanceof FollowerInitialSyncUpStatus) {
-                shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
+                roleChangeNotifier.orElseThrow().forward(message, context());
+            } else if (message instanceof FollowerInitialSyncUpStatus request) {
+                shardMBean.setFollowerInitialSyncStatus(request.isInitialSyncDone());
                 context().parent().tell(message, self());
             } else if (GET_SHARD_MBEAN_MESSAGE.equals(message)) {
                 sender().tell(getShardMBean(), self());
@@ -413,11 +392,8 @@ public class Shard extends RaftActor {
                 sender().tell(store.getDataTree(), self());
             } else if (message instanceof ServerRemoved) {
                 context().parent().forward(message, context());
-            } else if (ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
-                messageRetrySupport.onTimerMessage(message);
-            } else if (message instanceof DataTreeCohortActorRegistry.CohortRegistryCommand) {
-                store.processCohortRegistryCommand(getSender(),
-                        (DataTreeCohortActorRegistry.CohortRegistryCommand) message);
+            } else if (message instanceof DataTreeCohortActorRegistry.CohortRegistryCommand request) {
+                store.processCohortRegistryCommand(getSender(), request);
             } else if (message instanceof MakeLeaderLocal) {
                 onMakeLeaderLocal();
             } else if (RESUME_NEXT_PENDING_TRANSACTION.equals(message)) {
@@ -425,7 +401,28 @@ public class Shard extends RaftActor {
             } else if (GetKnownClients.INSTANCE.equals(message)) {
                 handleGetKnownClients();
             } else if (!responseMessageSlicer.handleMessage(message)) {
-                super.handleNonRaftCommand(message);
+                // Ask-based protocol messages
+                if (CreateTransaction.isSerializedType(message)) {
+                    handleCreateTransaction(message);
+                } else if (message instanceof BatchedModifications request) {
+                    handleBatchedModifications(request);
+                } else if (message instanceof ForwardedReadyTransaction request) {
+                    handleForwardedReadyTransaction(request);
+                } else if (message instanceof ReadyLocalTransaction request) {
+                    handleReadyLocalTransaction(request);
+                } else if (CanCommitTransaction.isSerializedType(message)) {
+                    handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
+                } else if (CommitTransaction.isSerializedType(message)) {
+                    handleCommitTransaction(CommitTransaction.fromSerializable(message));
+                } else if (AbortTransaction.isSerializedType(message)) {
+                    handleAbortTransaction(AbortTransaction.fromSerializable(message));
+                } else if (CloseTransactionChain.isSerializedType(message)) {
+                    closeTransactionChain(CloseTransactionChain.fromSerializable(message));
+                } else if (ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
+                    messageRetrySupport.onTimerMessage(message);
+                } else {
+                    super.handleNonRaftCommand(message);
+                }
             }
         }
     }
@@ -471,7 +468,7 @@ public class Shard extends RaftActor {
     }
 
     private OptionalLong updateAccess(final SimpleShardDataTreeCohort cohort) {
-        final FrontendIdentifier frontend = cohort.getIdentifier().getHistoryId().getClientId().getFrontendId();
+        final FrontendIdentifier frontend = cohort.transactionId().getHistoryId().getClientId().getFrontendId();
         final LeaderFrontendState state = knownFrontends.get(frontend);
         if (state == null) {
             // Not tell-based protocol, do nothing
@@ -635,14 +632,12 @@ public class Shard extends RaftActor {
             throw new NotLeaderException(getSelf());
         }
 
-        final Request<?, ?> request = envelope.getMessage();
-        if (request instanceof TransactionRequest) {
-            final TransactionRequest<?> txReq = (TransactionRequest<?>)request;
-            final ClientIdentifier clientId = txReq.getTarget().getHistoryId().getClientId();
+        final var request = envelope.getMessage();
+        if (request instanceof TransactionRequest<?> txReq) {
+            final var clientId = txReq.getTarget().getHistoryId().getClientId();
             return getFrontend(clientId).handleTransactionRequest(txReq, envelope, now);
-        } else if (request instanceof LocalHistoryRequest) {
-            final LocalHistoryRequest<?> lhReq = (LocalHistoryRequest<?>)request;
-            final ClientIdentifier clientId = lhReq.getTarget().getClientId();
+        } else if (request instanceof LocalHistoryRequest<?> lhReq) {
+            final var clientId = lhReq.getTarget().getClientId();
             return getFrontend(clientId).handleLocalHistoryRequest(lhReq, envelope, now);
         } else {
             LOG.warn("{}: rejecting unsupported request {}", persistenceId(), request);
@@ -712,13 +707,14 @@ public class Shard extends RaftActor {
         }
     }
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     private void handleCommitTransaction(final CommitTransaction commit) {
-        final TransactionIdentifier txId = commit.getTransactionId();
+        final var txId = commit.getTransactionId();
         if (isLeader()) {
             askProtocolEncountered(txId);
             commitCoordinator.handleCommit(txId, getSender(), this);
         } else {
-            ActorSelection leader = getLeader();
+            final var leader = getLeader();
             if (leader == null) {
                 messageRetrySupport.addMessageToRetry(commit, getSender(), "Could not commit transaction " + txId);
             } else {
@@ -728,15 +724,16 @@ public class Shard extends RaftActor {
         }
     }
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
-        final TransactionIdentifier txId = canCommit.getTransactionId();
+        final var txId = canCommit.getTransactionId();
         LOG.debug("{}: Can committing transaction {}", persistenceId(), txId);
 
         if (isLeader()) {
             askProtocolEncountered(txId);
             commitCoordinator.handleCanCommit(txId, getSender(), this);
         } else {
-            ActorSelection leader = getLeader();
+            final var leader = getLeader();
             if (leader == null) {
                 messageRetrySupport.addMessageToRetry(canCommit, getSender(),
                         "Could not canCommit transaction " + txId);
@@ -748,6 +745,7 @@ public class Shard extends RaftActor {
     }
 
     @SuppressWarnings("checkstyle:IllegalCatch")
+    @Deprecated(since = "9.0.0", forRemoval = true)
     private void handleBatchedModificationsLocal(final BatchedModifications batched, final ActorRef sender) {
         askProtocolEncountered(batched.getTransactionId());
 
@@ -760,6 +758,7 @@ public class Shard extends RaftActor {
         }
     }
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     private void handleBatchedModifications(final BatchedModifications batched) {
         // This message is sent to prepare the modifications transaction directly on the Shard as an
         // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
@@ -777,7 +776,7 @@ public class Shard extends RaftActor {
         if (isLeader() && isLeaderActive) {
             handleBatchedModificationsLocal(batched, getSender());
         } else {
-            ActorSelection leader = getLeader();
+            final var leader = getLeader();
             if (!isLeaderActive || leader == null) {
                 messageRetrySupport.addMessageToRetry(batched, getSender(),
                         "Could not process BatchedModifications " + batched.getTransactionId());
@@ -786,9 +785,8 @@ public class Shard extends RaftActor {
                 // we need to reconstruct previous BatchedModifications from the transaction
                 // DataTreeModification, honoring the max batched modification count, and forward all the
                 // previous BatchedModifications to the new leader.
-                Collection<BatchedModifications> newModifications = commitCoordinator
-                        .createForwardedBatchedModifications(batched,
-                                datastoreContext.getShardBatchedModificationCount());
+                final var newModifications = commitCoordinator.createForwardedBatchedModifications(batched,
+                    datastoreContext.getShardBatchedModificationCount());
 
                 LOG.debug("{}: Forwarding {} BatchedModifications to leader {}", persistenceId(),
                         newModifications.size(), leader);
@@ -817,11 +815,12 @@ public class Shard extends RaftActor {
     }
 
     @SuppressWarnings("checkstyle:IllegalCatch")
-    private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
-        final TransactionIdentifier txId = message.getTransactionId();
+    @Deprecated(since = "9.0.0", forRemoval = true)
+   private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
+        final var txId = message.getTransactionId();
         LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), txId);
 
-        boolean isLeaderActive = isLeaderActive();
+        final var isLeaderActive = isLeaderActive();
         if (isLeader() && isLeaderActive) {
             askProtocolEncountered(txId);
             try {
@@ -831,7 +830,7 @@ public class Shard extends RaftActor {
                 getSender().tell(new Failure(e), getSelf());
             }
         } else {
-            ActorSelection leader = getLeader();
+            final var leader = getLeader();
             if (!isLeaderActive || leader == null) {
                 messageRetrySupport.addMessageToRetry(message, getSender(),
                         "Could not process ready local transaction " + txId);
@@ -843,22 +842,23 @@ public class Shard extends RaftActor {
         }
     }
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     private void handleForwardedReadyTransaction(final ForwardedReadyTransaction forwardedReady) {
         LOG.debug("{}: handleForwardedReadyTransaction for {}", persistenceId(), forwardedReady.getTransactionId());
 
-        boolean isLeaderActive = isLeaderActive();
+        final var isLeaderActive = isLeaderActive();
         if (isLeader() && isLeaderActive) {
             askProtocolEncountered(forwardedReady.getTransactionId());
             commitCoordinator.handleForwardedReadyTransaction(forwardedReady, getSender(), this);
         } else {
-            ActorSelection leader = getLeader();
+            final var leader = getLeader();
             if (!isLeaderActive || leader == null) {
                 messageRetrySupport.addMessageToRetry(forwardedReady, getSender(),
                         "Could not process forwarded ready transaction " + forwardedReady.getTransactionId());
             } else {
                 LOG.debug("{}: Forwarding ForwardedReadyTransaction to leader {}", persistenceId(), leader);
 
-                ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionId(),
+                final var readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionId(),
                         forwardedReady.getTransaction().getSnapshot(), forwardedReady.isDoImmediateCommit(),
                         forwardedReady.getParticipatingShardNames());
                 readyLocal.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
@@ -867,8 +867,9 @@ public class Shard extends RaftActor {
         }
     }
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     private void handleAbortTransaction(final AbortTransaction abort) {
-        final TransactionIdentifier transactionId = abort.getTransactionId();
+        final var transactionId = abort.getTransactionId();
         askProtocolEncountered(transactionId);
         doAbortTransaction(transactionId, getSender());
     }
@@ -877,6 +878,7 @@ public class Shard extends RaftActor {
         commitCoordinator.handleAbort(transactionID, sender, this);
     }
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     private void handleCreateTransaction(final Object message) {
         if (isLeader()) {
             createTransaction(CreateTransaction.fromSerializable(message));
@@ -888,9 +890,10 @@ public class Shard extends RaftActor {
         }
     }
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
         if (isLeader()) {
-            final LocalHistoryIdentifier id = closeTransactionChain.getIdentifier();
+            final var id = closeTransactionChain.getIdentifier();
             askProtocolEncountered(id.getClientId());
             store.closeTransactionChain(id);
         } else if (getLeader() != null) {
@@ -900,6 +903,7 @@ public class Shard extends RaftActor {
         }
     }
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     @SuppressWarnings("checkstyle:IllegalCatch")
     private void createTransaction(final CreateTransaction createTransaction) {
         askProtocolEncountered(createTransaction.getTransactionId());
@@ -910,7 +914,7 @@ public class Shard extends RaftActor {
                 return;
             }
 
-            ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
+            final var transactionActor = createTransaction(createTransaction.getTransactionType(),
                 createTransaction.getTransactionId());
 
             getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
@@ -920,6 +924,7 @@ public class Shard extends RaftActor {
         }
     }
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     private ActorRef createTransaction(final int transactionType, final TransactionIdentifier transactionId) {
         LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
         return transactionActorFactory.newShardTransaction(TransactionType.fromInt(transactionType),
@@ -927,14 +932,16 @@ public class Shard extends RaftActor {
     }
 
     // Called on leader only
+    @Deprecated(since = "9.0.0", forRemoval = true)
     private void askProtocolEncountered(final TransactionIdentifier transactionId) {
         askProtocolEncountered(transactionId.getHistoryId().getClientId());
     }
 
     // Called on leader only
+    @Deprecated(since = "9.0.0", forRemoval = true)
     private void askProtocolEncountered(final ClientIdentifier clientId) {
-        final FrontendIdentifier frontend = clientId.getFrontendId();
-        final LeaderFrontendState state = knownFrontends.get(frontend);
+        final var frontend = clientId.getFrontendId();
+        final var state = knownFrontends.get(frontend);
         if (!(state instanceof LeaderFrontendState.Disabled)) {
             LOG.debug("{}: encountered ask-based client {}, disabling transaction tracking", persistenceId(), clientId);
             if (knownFrontends.isEmpty()) {
@@ -948,7 +955,7 @@ public class Shard extends RaftActor {
     }
 
     private void updateSchemaContext(final UpdateSchemaContext message) {
-        updateSchemaContext(message.getEffectiveModelContext());
+        updateSchemaContext(message.modelContext());
     }
 
     @VisibleForTesting
@@ -981,13 +988,13 @@ public class Shard extends RaftActor {
         restoreFromSnapshot = null;
 
         //notify shard manager
-        getContext().parent().tell(new ActorInitialized(), getSelf());
+        getContext().parent().tell(new ActorInitialized(getSelf()), ActorRef.noSender());
 
         // Being paranoid here - this method should only be called once but just in case...
         if (txCommitTimeoutCheckSchedule == null) {
             // Schedule a message to be periodically sent to check if the current in-progress
             // transaction should be expired and aborted.
-            FiniteDuration period = FiniteDuration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
+            final var period = FiniteDuration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
             txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
                     period, period, getSelf(),
                     TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
@@ -996,14 +1003,14 @@ public class Shard extends RaftActor {
 
     @Override
     protected final void applyState(final ActorRef clientActor, final Identifier identifier, final Object data) {
-        if (data instanceof Payload) {
-            if (data instanceof DisableTrackingPayload) {
-                disableTracking((DisableTrackingPayload) data);
+        if (data instanceof Payload payload) {
+            if (payload instanceof DisableTrackingPayload disableTracking) {
+                disableTracking(disableTracking);
                 return;
             }
 
             try {
-                store.applyReplicatedPayload(identifier, (Payload)data);
+                store.applyReplicatedPayload(identifier, payload);
             } catch (DataValidationFailedException | IOException e) {
                 LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e);
             }
@@ -1092,10 +1099,8 @@ public class Shard extends RaftActor {
         paused = true;
 
         // Tell-based protocol can replay transaction state, so it is safe to blow it up when we are paused.
-        if (datastoreContext.isUseTellBasedProtocol()) {
-            knownFrontends.values().forEach(LeaderFrontendState::retire);
-            knownFrontends = ImmutableMap.of();
-        }
+        knownFrontends.values().forEach(LeaderFrontendState::retire);
+        knownFrontends = ImmutableMap.of();
 
         store.setRunOnPendingTransactionsComplete(operation);
     }
@@ -1163,7 +1168,7 @@ public class Shard extends RaftActor {
         private ShardIdentifier id;
         private Map<String, String> peerAddresses = Collections.emptyMap();
         private DatastoreContext datastoreContext;
-        private EffectiveModelContextProvider schemaContextProvider;
+        private Supplier<@NonNull EffectiveModelContext> schemaContextProvider;
         private DatastoreSnapshot.ShardSnapshot restoreFromSnapshot;
         private DataTree dataTree;
 
@@ -1184,37 +1189,37 @@ public class Shard extends RaftActor {
 
         public T id(final ShardIdentifier newId) {
             checkSealed();
-            this.id = newId;
+            id = newId;
             return self();
         }
 
         public T peerAddresses(final Map<String, String> newPeerAddresses) {
             checkSealed();
-            this.peerAddresses = newPeerAddresses;
+            peerAddresses = newPeerAddresses;
             return self();
         }
 
         public T datastoreContext(final DatastoreContext newDatastoreContext) {
             checkSealed();
-            this.datastoreContext = newDatastoreContext;
+            datastoreContext = newDatastoreContext;
             return self();
         }
 
-        public T schemaContextProvider(final EffectiveModelContextProvider newSchemaContextProvider) {
+        public T schemaContextProvider(final Supplier<@NonNull EffectiveModelContext> newSchemaContextProvider) {
             checkSealed();
-            this.schemaContextProvider = requireNonNull(newSchemaContextProvider);
+            schemaContextProvider = requireNonNull(newSchemaContextProvider);
             return self();
         }
 
         public T restoreFromSnapshot(final DatastoreSnapshot.ShardSnapshot newRestoreFromSnapshot) {
             checkSealed();
-            this.restoreFromSnapshot = newRestoreFromSnapshot;
+            restoreFromSnapshot = newRestoreFromSnapshot;
             return self();
         }
 
         public T dataTree(final DataTree newDataTree) {
             checkSealed();
-            this.dataTree = newDataTree;
+            dataTree = newDataTree;
             return self();
         }
 
@@ -1231,7 +1236,7 @@ public class Shard extends RaftActor {
         }
 
         public EffectiveModelContext getSchemaContext() {
-            return verifyNotNull(schemaContextProvider.getEffectiveModelContext());
+            return verifyNotNull(schemaContextProvider.get());
         }
 
         public DatastoreSnapshot.ShardSnapshot getRestoreFromSnapshot() {
@@ -1243,15 +1248,10 @@ public class Shard extends RaftActor {
         }
 
         public TreeType getTreeType() {
-            switch (datastoreContext.getLogicalStoreType()) {
-                case CONFIGURATION:
-                    return TreeType.CONFIGURATION;
-                case OPERATIONAL:
-                    return TreeType.OPERATIONAL;
-                default:
-                    throw new IllegalStateException("Unhandled logical store type "
-                            + datastoreContext.getLogicalStoreType());
-            }
+            return switch (datastoreContext.getLogicalStoreType()) {
+                case CONFIGURATION -> TreeType.CONFIGURATION;
+                case OPERATIONAL -> TreeType.OPERATIONAL;
+            };
         }
 
         protected void verify() {
index d9520c5d5c6596ab8c9a9a7a626d3b1ebe657c71..946203b6b76aa5e2c4b4f94a849a9430f2d3fa06 100644 (file)
@@ -15,11 +15,11 @@ import akka.serialization.Serialization;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.primitives.UnsignedLong;
 import com.google.common.util.concurrent.FutureCallback;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.ArrayDeque;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.Deque;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.Map;
@@ -38,7 +38,8 @@ import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionRe
 import org.opendaylight.controller.cluster.datastore.messages.VersionedExternalizableMessage;
 import org.opendaylight.controller.cluster.datastore.utils.AbstractBatchedModificationsCursor;
 import org.opendaylight.yangtools.concepts.Identifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 import org.slf4j.Logger;
 
 /**
@@ -46,6 +47,7 @@ import org.slf4j.Logger;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 final class ShardCommitCoordinator {
 
     // Interface hook for unit tests to replace or decorate the ShardDataTreeCohorts.
@@ -128,6 +130,7 @@ final class ShardCommitCoordinator {
      * @param batched the BatchedModifications message to process
      * @param sender the sender of the message
      */
+    @SuppressFBWarnings(value = "THROWS_METHOD_THROWS_RUNTIMEEXCEPTION", justification = "Replay of captured failure")
     void handleBatchedModifications(final BatchedModifications batched, final ActorRef sender, final Shard shard) {
         CohortEntry cohortEntry = cohortCache.get(batched.getTransactionId());
         if (cohortEntry == null || cohortEntry.isSealed()) {
@@ -204,6 +207,7 @@ final class ShardCommitCoordinator {
         }
     }
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     Collection<BatchedModifications> createForwardedBatchedModifications(final BatchedModifications from,
             final int maxModificationsPerBatch) {
         CohortEntry cohortEntry = cohortCache.remove(from.getTransactionId());
@@ -236,9 +240,9 @@ final class ShardCommitCoordinator {
     }
 
     private void handleCanCommit(final CohortEntry cohortEntry) {
-        cohortEntry.canCommit(new FutureCallback<Void>() {
+        cohortEntry.canCommit(new FutureCallback<>() {
             @Override
-            public void onSuccess(final Void result) {
+            public void onSuccess(final Empty result) {
                 log.debug("{}: canCommit for {}: success", name, cohortEntry.getTransactionId());
 
                 if (cohortEntry.isDoImmediateCommit()) {
@@ -371,9 +375,9 @@ final class ShardCommitCoordinator {
         log.debug("{}: Aborting transaction {}", name, transactionID);
 
         final ActorRef self = shard.getSelf();
-        cohortEntry.abort(new FutureCallback<Void>() {
+        cohortEntry.abort(new FutureCallback<>() {
             @Override
-            public void onSuccess(final Void result) {
+            public void onSuccess(final Empty result) {
                 if (sender != null) {
                     sender.tell(AbortTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(), self);
                 }
@@ -397,19 +401,18 @@ final class ShardCommitCoordinator {
     }
 
     void abortPendingTransactions(final String reason, final Shard shard) {
-        final Failure failure = new Failure(new RuntimeException(reason));
-        Collection<ShardDataTreeCohort> pending = dataTree.getAndClearPendingTransactions();
+        final var failure = new Failure(new RuntimeException(reason));
+        final var pending = dataTree.getAndClearPendingTransactions();
 
         log.debug("{}: Aborting {} pending queued transactions", name, pending.size());
 
-        for (ShardDataTreeCohort cohort : pending) {
-            CohortEntry cohortEntry = cohortCache.remove(cohort.getIdentifier());
-            if (cohortEntry == null) {
-                continue;
-            }
-
-            if (cohortEntry.getReplySender() != null) {
-                cohortEntry.getReplySender().tell(failure, shard.self());
+        for (var cohort : pending) {
+            final var cohortEntry = cohortCache.remove(cohort.transactionId());
+            if (cohortEntry != null) {
+                final var replySender = cohortEntry.getReplySender();
+                if (replySender != null) {
+                    replySender.tell(failure, shard.self());
+                }
             }
         }
 
@@ -417,32 +420,31 @@ final class ShardCommitCoordinator {
     }
 
     Collection<?> convertPendingTransactionsToMessages(final int maxModificationsPerBatch) {
-        final Collection<VersionedExternalizableMessage> messages = new ArrayList<>();
-        for (ShardDataTreeCohort cohort : dataTree.getAndClearPendingTransactions()) {
-            CohortEntry cohortEntry = cohortCache.remove(cohort.getIdentifier());
+        final var messages = new ArrayList<VersionedExternalizableMessage>();
+        for (var cohort : dataTree.getAndClearPendingTransactions()) {
+            final var cohortEntry = cohortCache.remove(cohort.transactionId());
             if (cohortEntry == null) {
                 continue;
             }
 
-            final Deque<BatchedModifications> newMessages = new ArrayDeque<>();
+            final var newMessages = new ArrayDeque<BatchedModifications>();
             cohortEntry.getDataTreeModification().applyToCursor(new AbstractBatchedModificationsCursor() {
                 @Override
                 protected BatchedModifications getModifications() {
-                    final BatchedModifications lastBatch = newMessages.peekLast();
-
+                    final var lastBatch = newMessages.peekLast();
                     if (lastBatch != null && lastBatch.getModifications().size() >= maxModificationsPerBatch) {
                         return lastBatch;
                     }
 
                     // Allocate a new message
-                    final BatchedModifications ret = new BatchedModifications(cohortEntry.getTransactionId(),
+                    final var ret = new BatchedModifications(cohortEntry.getTransactionId(),
                         cohortEntry.getClientVersion());
                     newMessages.add(ret);
                     return ret;
                 }
             });
 
-            final BatchedModifications last = newMessages.peekLast();
+            final var last = newMessages.peekLast();
             if (last != null) {
                 final boolean immediate = cohortEntry.isDoImmediateCommit();
                 last.setDoCommitOnReady(immediate);
index 4aa7a7b786b6a0b925c4233a9043db59cb2be355..72e7a545a7e36a41ea28f79e2b10e5dbfbb6c8e7 100644 (file)
@@ -35,7 +35,6 @@ import java.util.Deque;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Map;
-import java.util.Map.Entry;
 import java.util.Optional;
 import java.util.OptionalLong;
 import java.util.Queue;
@@ -57,7 +56,6 @@ import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifia
 import org.opendaylight.controller.cluster.datastore.persisted.CloseLocalHistoryPayload;
 import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload;
 import org.opendaylight.controller.cluster.datastore.persisted.CreateLocalHistoryPayload;
-import org.opendaylight.controller.cluster.datastore.persisted.DataTreeCandidateInputOutput.DataTreeCandidateWithVersion;
 import org.opendaylight.controller.cluster.datastore.persisted.MetadataShardDataTreeSnapshot;
 import org.opendaylight.controller.cluster.datastore.persisted.PayloadVersion;
 import org.opendaylight.controller.cluster.datastore.persisted.PurgeLocalHistoryPayload;
@@ -70,31 +68,31 @@ import org.opendaylight.controller.cluster.datastore.utils.DataTreeModificationO
 import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
 import org.opendaylight.controller.cluster.datastore.utils.PruningDataTreeModification;
 import org.opendaylight.controller.cluster.raft.base.messages.InitiateCaptureSnapshot;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.mdsal.common.api.OptimisticLockFailedException;
 import org.opendaylight.mdsal.common.api.TransactionCommitFailedException;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
 import org.opendaylight.yangtools.concepts.Identifier;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ConflictingModificationAppliedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeTip;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
 import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.ConflictingModificationAppliedException;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateTip;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeTip;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
+import org.opendaylight.yangtools.yang.data.tree.api.TreeType;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidates;
 import org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree;
 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.duration.FiniteDuration;
@@ -120,7 +118,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
 
         @Override
         public String toString() {
-            return "CommitEntry [tx=" + cohort.getIdentifier() + ", state=" + cohort.getState() + "]";
+            return "CommitEntry [tx=" + cohort.transactionId() + ", state=" + cohort.getState() + "]";
         }
     }
 
@@ -161,7 +159,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
      */
     private DataTreeTip tip;
 
-    private SchemaContext schemaContext;
+    private EffectiveModelContext schemaContext;
     private DataSchemaContextTree dataSchemaContext;
 
     private int currentTransactionBatch;
@@ -199,7 +197,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
 
     @VisibleForTesting
     public ShardDataTree(final Shard shard, final EffectiveModelContext schemaContext, final TreeType treeType) {
-        this(shard, schemaContext, treeType, YangInstanceIdentifier.empty(),
+        this(shard, schemaContext, treeType, YangInstanceIdentifier.of(),
                 new DefaultShardDataTreeChangeListenerPublisher(""), "");
     }
 
@@ -216,7 +214,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
     }
 
     @VisibleForTesting
-    final SchemaContext getSchemaContext() {
+    final EffectiveModelContext getSchemaContext() {
         return schemaContext;
     }
 
@@ -236,7 +234,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
      * @return A state snapshot
      */
     @NonNull ShardDataTreeSnapshot takeStateSnapshot() {
-        final NormalizedNode rootNode = dataTree.takeSnapshot().readNode(YangInstanceIdentifier.empty()).get();
+        final NormalizedNode rootNode = takeSnapshot().readNode(YangInstanceIdentifier.of()).orElseThrow();
         final Builder<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> metaBuilder =
                 ImmutableMap.builder();
 
@@ -263,14 +261,14 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         }
 
         final Map<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> snapshotMeta;
-        if (snapshot instanceof MetadataShardDataTreeSnapshot) {
-            snapshotMeta = ((MetadataShardDataTreeSnapshot) snapshot).getMetadata();
+        if (snapshot instanceof MetadataShardDataTreeSnapshot metaSnapshot) {
+            snapshotMeta = metaSnapshot.getMetadata();
         } else {
             snapshotMeta = ImmutableMap.of();
         }
 
-        for (ShardDataTreeMetadata<?> m : metadata) {
-            final ShardDataTreeSnapshotMetadata<?> s = snapshotMeta.get(m.getSupportedType());
+        for (var m : metadata) {
+            final var s = snapshotMeta.get(m.getSupportedType());
             if (s != null) {
                 m.applySnapshot(s);
             } else {
@@ -278,16 +276,16 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
             }
         }
 
-        final DataTreeModification unwrapped = dataTree.takeSnapshot().newModification();
+        final DataTreeModification unwrapped = newModification();
         final DataTreeModification mod = wrapper.apply(unwrapped);
         // delete everything first
-        mod.delete(YangInstanceIdentifier.empty());
+        mod.delete(YangInstanceIdentifier.of());
 
-        final Optional<NormalizedNode> maybeNode = snapshot.getRootNode();
-        if (maybeNode.isPresent()) {
+        snapshot.getRootNode().ifPresent(rootNode -> {
             // Add everything from the remote node back
-            mod.write(YangInstanceIdentifier.empty(), maybeNode.get());
-        }
+            mod.write(YangInstanceIdentifier.of(), rootNode);
+        });
+
         mod.ready();
 
         dataTree.validate(unwrapped);
@@ -333,35 +331,35 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
 
     @SuppressWarnings("checkstyle:IllegalCatch")
     private void applyRecoveryCandidate(final CommitTransactionPayload payload) throws IOException {
-        final Entry<TransactionIdentifier, DataTreeCandidateWithVersion> entry = payload.acquireCandidate();
-        final DataTreeModification unwrapped = dataTree.takeSnapshot().newModification();
-        final PruningDataTreeModification mod = createPruningModification(unwrapped,
-            NormalizedNodeStreamVersion.MAGNESIUM.compareTo(entry.getValue().getVersion()) > 0);
+        final var entry = payload.acquireCandidate();
+        final var unwrapped = newModification();
+        final var pruningMod = createPruningModification(unwrapped,
+            NormalizedNodeStreamVersion.MAGNESIUM.compareTo(entry.streamVersion()) > 0);
 
-        DataTreeCandidates.applyToModification(mod, entry.getValue().getCandidate());
-        mod.ready();
+        DataTreeCandidates.applyToModification(pruningMod, entry.candidate());
+        pruningMod.ready();
         LOG.trace("{}: Applying recovery modification {}", logContext, unwrapped);
 
         try {
             dataTree.validate(unwrapped);
             dataTree.commit(dataTree.prepare(unwrapped));
         } catch (Exception e) {
-            File file = new File(System.getProperty("karaf.data", "."),
+            final var file = new File(System.getProperty("karaf.data", "."),
                     "failed-recovery-payload-" + logContext + ".out");
             DataTreeModificationOutput.toFile(file, unwrapped);
-            throw new IllegalStateException(String.format(
-                    "%s: Failed to apply recovery payload. Modification data was written to file %s",
-                    logContext, file), e);
+            throw new IllegalStateException(
+                "%s: Failed to apply recovery payload. Modification data was written to file %s".formatted(
+                    logContext, file),
+                e);
         }
 
-        allMetadataCommittedTransaction(entry.getKey());
+        allMetadataCommittedTransaction(entry.transactionId());
     }
 
     private PruningDataTreeModification createPruningModification(final DataTreeModification unwrapped,
             final boolean uintAdapting) {
         // TODO: we should be able to reuse the pruner, provided we are not reentrant
-        final ReusableNormalizedNodePruner pruner = ReusableNormalizedNodePruner.forDataSchemaContext(
-            dataSchemaContext);
+        final var pruner = ReusableNormalizedNodePruner.forDataSchemaContext(dataSchemaContext);
         return uintAdapting ? new PruningDataTreeModification.Proactive(unwrapped, dataTree, pruner.withUintAdaption())
                 : new PruningDataTreeModification.Reactive(unwrapped, dataTree, pruner);
     }
@@ -375,20 +373,20 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
      * @throws DataValidationFailedException when the snapshot fails to apply
      */
     final void applyRecoveryPayload(final @NonNull Payload payload) throws IOException {
-        if (payload instanceof CommitTransactionPayload) {
-            applyRecoveryCandidate((CommitTransactionPayload) payload);
-        } else if (payload instanceof AbortTransactionPayload) {
-            allMetadataAbortedTransaction(((AbortTransactionPayload) payload).getIdentifier());
-        } else if (payload instanceof PurgeTransactionPayload) {
-            allMetadataPurgedTransaction(((PurgeTransactionPayload) payload).getIdentifier());
-        } else if (payload instanceof CreateLocalHistoryPayload) {
-            allMetadataCreatedLocalHistory(((CreateLocalHistoryPayload) payload).getIdentifier());
-        } else if (payload instanceof CloseLocalHistoryPayload) {
-            allMetadataClosedLocalHistory(((CloseLocalHistoryPayload) payload).getIdentifier());
-        } else if (payload instanceof PurgeLocalHistoryPayload) {
-            allMetadataPurgedLocalHistory(((PurgeLocalHistoryPayload) payload).getIdentifier());
-        } else if (payload instanceof SkipTransactionsPayload) {
-            allMetadataSkipTransactions((SkipTransactionsPayload) payload);
+        if (payload instanceof CommitTransactionPayload commit) {
+            applyRecoveryCandidate(commit);
+        } else if (payload instanceof AbortTransactionPayload abort) {
+            allMetadataAbortedTransaction(abort.getIdentifier());
+        } else if (payload instanceof PurgeTransactionPayload purge) {
+            allMetadataPurgedTransaction(purge.getIdentifier());
+        } else if (payload instanceof CreateLocalHistoryPayload create) {
+            allMetadataCreatedLocalHistory(create.getIdentifier());
+        } else if (payload instanceof CloseLocalHistoryPayload close) {
+            allMetadataClosedLocalHistory(close.getIdentifier());
+        } else if (payload instanceof PurgeLocalHistoryPayload purge) {
+            allMetadataPurgedLocalHistory(purge.getIdentifier());
+        } else if (payload instanceof SkipTransactionsPayload skip) {
+            allMetadataSkipTransactions(skip);
         } else {
             LOG.debug("{}: ignoring unhandled payload {}", logContext, payload);
         }
@@ -396,21 +394,21 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
 
     private void applyReplicatedCandidate(final CommitTransactionPayload payload)
             throws DataValidationFailedException, IOException {
-        final Entry<TransactionIdentifier, DataTreeCandidateWithVersion> entry = payload.acquireCandidate();
-        final TransactionIdentifier identifier = entry.getKey();
-        LOG.debug("{}: Applying foreign transaction {}", logContext, identifier);
+        final var payloadCandidate = payload.acquireCandidate();
+        final var transactionId = payloadCandidate.transactionId();
+        LOG.debug("{}: Applying foreign transaction {}", logContext, transactionId);
 
-        final DataTreeModification mod = dataTree.takeSnapshot().newModification();
+        final var mod = newModification();
         // TODO: check version here, which will enable us to perform forward-compatibility transformations
-        DataTreeCandidates.applyToModification(mod, entry.getValue().getCandidate());
+        DataTreeCandidates.applyToModification(mod, payloadCandidate.candidate());
         mod.ready();
 
         LOG.trace("{}: Applying foreign modification {}", logContext, mod);
         dataTree.validate(mod);
-        final DataTreeCandidate candidate = dataTree.prepare(mod);
+        final var candidate = dataTree.prepare(mod);
         dataTree.commit(candidate);
 
-        allMetadataCommittedTransaction(identifier);
+        allMetadataCommittedTransaction(transactionId);
         notifyListeners(candidate);
     }
 
@@ -436,52 +434,51 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
          * In any case, we know that this is an entry coming from replication, hence we can be sure we will not observe
          * pre-Boron state -- which limits the number of options here.
          */
-        if (payload instanceof CommitTransactionPayload) {
+        if (payload instanceof CommitTransactionPayload commit) {
             if (identifier == null) {
-                applyReplicatedCandidate((CommitTransactionPayload) payload);
+                applyReplicatedCandidate(commit);
             } else {
                 verify(identifier instanceof TransactionIdentifier);
                 // if we did not track this transaction before, it means that it came from another leader and we are in
                 // the process of commiting it while in PreLeader state. That means that it hasnt yet been committed to
                 // the local DataTree and would be lost if it was only applied via payloadReplicationComplete().
                 if (!payloadReplicationComplete((TransactionIdentifier) identifier)) {
-                    applyReplicatedCandidate((CommitTransactionPayload) payload);
+                    applyReplicatedCandidate(commit);
                 }
             }
 
             // make sure acquireCandidate() is the last call touching the payload data as we want it to be GC-ed.
-            checkRootOverwrite(((CommitTransactionPayload) payload).acquireCandidate().getValue()
-                    .getCandidate());
-        } else if (payload instanceof AbortTransactionPayload) {
+            checkRootOverwrite(commit.acquireCandidate().candidate());
+        } else if (payload instanceof AbortTransactionPayload abort) {
             if (identifier != null) {
-                payloadReplicationComplete((AbortTransactionPayload) payload);
+                payloadReplicationComplete(abort);
             }
-            allMetadataAbortedTransaction(((AbortTransactionPayload) payload).getIdentifier());
-        } else if (payload instanceof PurgeTransactionPayload) {
+            allMetadataAbortedTransaction(abort.getIdentifier());
+        } else if (payload instanceof PurgeTransactionPayload purge) {
             if (identifier != null) {
-                payloadReplicationComplete((PurgeTransactionPayload) payload);
+                payloadReplicationComplete(purge);
             }
-            allMetadataPurgedTransaction(((PurgeTransactionPayload) payload).getIdentifier());
-        } else if (payload instanceof CloseLocalHistoryPayload) {
+            allMetadataPurgedTransaction(purge.getIdentifier());
+        } else if (payload instanceof CloseLocalHistoryPayload close) {
             if (identifier != null) {
-                payloadReplicationComplete((CloseLocalHistoryPayload) payload);
+                payloadReplicationComplete(close);
             }
-            allMetadataClosedLocalHistory(((CloseLocalHistoryPayload) payload).getIdentifier());
-        } else if (payload instanceof CreateLocalHistoryPayload) {
+            allMetadataClosedLocalHistory(close.getIdentifier());
+        } else if (payload instanceof CreateLocalHistoryPayload create) {
             if (identifier != null) {
-                payloadReplicationComplete((CreateLocalHistoryPayload)payload);
+                payloadReplicationComplete(create);
             }
-            allMetadataCreatedLocalHistory(((CreateLocalHistoryPayload) payload).getIdentifier());
-        } else if (payload instanceof PurgeLocalHistoryPayload) {
+            allMetadataCreatedLocalHistory(create.getIdentifier());
+        } else if (payload instanceof PurgeLocalHistoryPayload purge) {
             if (identifier != null) {
-                payloadReplicationComplete((PurgeLocalHistoryPayload)payload);
+                payloadReplicationComplete(purge);
             }
-            allMetadataPurgedLocalHistory(((PurgeLocalHistoryPayload) payload).getIdentifier());
-        } else if (payload instanceof SkipTransactionsPayload) {
+            allMetadataPurgedLocalHistory(purge.getIdentifier());
+        } else if (payload instanceof SkipTransactionsPayload skip) {
             if (identifier != null) {
-                payloadReplicationComplete((SkipTransactionsPayload)payload);
+                payloadReplicationComplete(skip);
             }
-            allMetadataSkipTransactions((SkipTransactionsPayload) payload);
+            allMetadataSkipTransactions(skip);
         } else {
             LOG.warn("{}: ignoring unhandled identifier {} payload {}", logContext, identifier, payload);
         }
@@ -499,8 +496,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         }
 
         // top level container ie "/"
-        if (candidate.getRootPath().isEmpty()
-                && candidate.getRootNode().getModificationType() == ModificationType.WRITE) {
+        if (candidate.getRootPath().isEmpty() && candidate.getRootNode().modificationType() == ModificationType.WRITE) {
             LOG.debug("{}: shard root overwritten, enqueuing snapshot", logContext);
             shard.self().tell(new InitiateCaptureSnapshot(), noSender());
         }
@@ -524,16 +520,17 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
     }
 
     private boolean payloadReplicationComplete(final TransactionIdentifier txId) {
-        final CommitEntry current = pendingFinishCommits.peek();
+        final var current = pendingFinishCommits.peek();
         if (current == null) {
             LOG.warn("{}: No outstanding transactions, ignoring consensus on transaction {}", logContext, txId);
             allMetadataCommittedTransaction(txId);
             return false;
         }
 
-        if (!current.cohort.getIdentifier().equals(txId)) {
+        final var cohortTxId = current.cohort.transactionId();
+        if (!cohortTxId.equals(txId)) {
             LOG.debug("{}: Head of pendingFinishCommits queue is {}, ignoring consensus on transaction {}", logContext,
-                current.cohort.getIdentifier(), txId);
+                cohortTxId, txId);
             allMetadataCommittedTransaction(txId);
             return false;
         }
@@ -617,25 +614,29 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         return chain;
     }
 
-    final ReadOnlyShardDataTreeTransaction newReadOnlyTransaction(final TransactionIdentifier txId) {
+    final @NonNull ReadOnlyShardDataTreeTransaction newReadOnlyTransaction(final TransactionIdentifier txId) {
         shard.getShardMBean().incrementReadOnlyTransactionCount();
 
-        if (txId.getHistoryId().getHistoryId() == 0) {
-            return new ReadOnlyShardDataTreeTransaction(this, txId, dataTree.takeSnapshot());
-        }
+        final var historyId = txId.getHistoryId();
+        return historyId.getHistoryId() == 0 ? newStandaloneReadOnlyTransaction(txId)
+            : ensureTransactionChain(historyId, null).newReadOnlyTransaction(txId);
+    }
 
-        return ensureTransactionChain(txId.getHistoryId(), null).newReadOnlyTransaction(txId);
+    final @NonNull ReadOnlyShardDataTreeTransaction newStandaloneReadOnlyTransaction(final TransactionIdentifier txId) {
+        return new ReadOnlyShardDataTreeTransaction(this, txId, takeSnapshot());
     }
 
-    final ReadWriteShardDataTreeTransaction newReadWriteTransaction(final TransactionIdentifier txId) {
+    final @NonNull ReadWriteShardDataTreeTransaction newReadWriteTransaction(final TransactionIdentifier txId) {
         shard.getShardMBean().incrementReadWriteTransactionCount();
 
-        if (txId.getHistoryId().getHistoryId() == 0) {
-            return new ReadWriteShardDataTreeTransaction(ShardDataTree.this, txId, dataTree.takeSnapshot()
-                    .newModification());
-        }
+        final var historyId = txId.getHistoryId();
+        return historyId.getHistoryId() == 0 ? newStandaloneReadWriteTransaction(txId)
+            : ensureTransactionChain(historyId, null).newReadWriteTransaction(txId);
+    }
 
-        return ensureTransactionChain(txId.getHistoryId(), null).newReadWriteTransaction(txId);
+    final @NonNull ReadWriteShardDataTreeTransaction newStandaloneReadWriteTransaction(
+            final TransactionIdentifier txId) {
+        return new ReadWriteShardDataTreeTransaction(this, txId, newModification());
     }
 
     @VisibleForTesting
@@ -728,13 +729,12 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
     }
 
     final Optional<DataTreeCandidate> readCurrentData() {
-        return dataTree.takeSnapshot().readNode(YangInstanceIdentifier.empty())
-                .map(state -> DataTreeCandidates.fromNormalizedNode(YangInstanceIdentifier.empty(), state));
+        return readNode(YangInstanceIdentifier.of())
+            .map(state -> DataTreeCandidates.fromNormalizedNode(YangInstanceIdentifier.of(), state));
     }
 
     final void registerTreeChangeListener(final YangInstanceIdentifier path, final DOMDataTreeChangeListener listener,
-            final Optional<DataTreeCandidate> initialState,
-            final Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration) {
+            final Optional<DataTreeCandidate> initialState, final Consumer<Registration> onRegistration) {
         treeChangeListenerPublisher.registerTreeChangeListener(path, listener, initialState, onRegistration);
     }
 
@@ -775,7 +775,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
 
     @VisibleForTesting
     public final Optional<NormalizedNode> readNode(final YangInstanceIdentifier path) {
-        return dataTree.takeSnapshot().readNode(path);
+        return takeSnapshot().readNode(path);
     }
 
     final DataTreeSnapshot takeSnapshot() {
@@ -784,7 +784,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
 
     @VisibleForTesting
     final DataTreeModification newModification() {
-        return dataTree.takeSnapshot().newModification();
+        return takeSnapshot().newModification();
     }
 
     final Collection<ShardDataTreeCohort> getAndClearPendingTransactions() {
@@ -830,25 +830,25 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
             final SimpleShardDataTreeCohort cohort = entry.cohort;
             final DataTreeModification modification = cohort.getDataTreeModification();
 
-            LOG.debug("{}: Validating transaction {}", logContext, cohort.getIdentifier());
+            LOG.debug("{}: Validating transaction {}", logContext, cohort.transactionId());
             Exception cause;
             try {
                 tip.validate(modification);
-                LOG.debug("{}: Transaction {} validated", logContext, cohort.getIdentifier());
+                LOG.debug("{}: Transaction {} validated", logContext, cohort.transactionId());
                 cohort.successfulCanCommit();
                 entry.lastAccess = readTime();
                 return;
             } catch (ConflictingModificationAppliedException e) {
-                LOG.warn("{}: Store Tx {}: Conflicting modification for path {}.", logContext, cohort.getIdentifier(),
+                LOG.warn("{}: Store Tx {}: Conflicting modification for path {}.", logContext, cohort.transactionId(),
                     e.getPath());
                 cause = new OptimisticLockFailedException("Optimistic lock failed for path " + e.getPath(), e);
             } catch (DataValidationFailedException e) {
-                LOG.warn("{}: Store Tx {}: Data validation failed for path {}.", logContext, cohort.getIdentifier(),
+                LOG.warn("{}: Store Tx {}: Data validation failed for path {}.", logContext, cohort.transactionId(),
                     e.getPath(), e);
 
                 // For debugging purposes, allow dumping of the modification. Coupled with the above
                 // precondition log, it should allow us to understand what went on.
-                LOG.debug("{}: Store Tx {}: modifications: {}", logContext, cohort.getIdentifier(), modification);
+                LOG.debug("{}: Store Tx {}: modifications: {}", logContext, cohort.transactionId(), modification);
                 LOG.trace("{}: Current tree: {}", logContext, dataTree);
                 cause = new TransactionCommitFailedException("Data did not pass validation for path " + e.getPath(), e);
             } catch (Exception e) {
@@ -873,7 +873,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
             final SimpleShardDataTreeCohort cohort = entry.cohort;
 
             if (cohort.isFailed()) {
-                LOG.debug("{}: Removing failed transaction {}", logContext, cohort.getIdentifier());
+                LOG.debug("{}: Removing failed transaction {}", logContext, cohort.transactionId());
                 queue.remove();
                 continue;
             }
@@ -919,12 +919,12 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
 
             Collection<String> precedingShardNames = extractPrecedingShardNames(cohort.getParticipatingShardNames());
             if (precedingShardNames.isEmpty()) {
-                LOG.debug("{}: Tx {} is scheduled for canCommit step", logContext, cohort.getIdentifier());
+                LOG.debug("{}: Tx {} is scheduled for canCommit step", logContext, cohort.transactionId());
                 return;
             }
 
             LOG.debug("{}: Evaluating tx {} for canCommit -  preceding participating shard names {}",
-                    logContext, cohort.getIdentifier(), precedingShardNames);
+                    logContext, cohort.transactionId(), precedingShardNames);
             final Iterator<CommitEntry> iter = pendingTransactions.iterator();
             int index = -1;
             int moveToIndex = -1;
@@ -935,29 +935,29 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
                 if (cohort.equals(entry.cohort)) {
                     if (moveToIndex < 0) {
                         LOG.debug("{}: Not moving tx {} - cannot proceed with canCommit",
-                                logContext, cohort.getIdentifier());
+                                logContext, cohort.transactionId());
                         return;
                     }
 
                     LOG.debug("{}: Moving {} to index {} in the pendingTransactions queue",
-                            logContext, cohort.getIdentifier(), moveToIndex);
+                            logContext, cohort.transactionId(), moveToIndex);
                     iter.remove();
                     insertEntry(pendingTransactions, entry, moveToIndex);
 
                     if (!cohort.equals(pendingTransactions.peek().cohort)) {
                         LOG.debug("{}: Tx {} is not at the head of the queue - cannot proceed with canCommit",
-                                logContext, cohort.getIdentifier());
+                                logContext, cohort.transactionId());
                         return;
                     }
 
                     LOG.debug("{}: Tx {} is now at the head of the queue - proceeding with canCommit",
-                            logContext, cohort.getIdentifier());
+                            logContext, cohort.transactionId());
                     break;
                 }
 
                 if (entry.cohort.getState() != State.READY) {
                     LOG.debug("{}: Skipping pending transaction {} in state {}",
-                            logContext, entry.cohort.getIdentifier(), entry.cohort.getState());
+                            logContext, entry.cohort.transactionId(), entry.cohort.getState());
                     continue;
                 }
 
@@ -967,16 +967,16 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
                 if (precedingShardNames.equals(pendingPrecedingShardNames)) {
                     if (moveToIndex < 0) {
                         LOG.debug("{}: Preceding shard names {} for pending tx {} match - saving moveToIndex {}",
-                                logContext, pendingPrecedingShardNames, entry.cohort.getIdentifier(), index);
+                                logContext, pendingPrecedingShardNames, entry.cohort.transactionId(), index);
                         moveToIndex = index;
                     } else {
                         LOG.debug(
                             "{}: Preceding shard names {} for pending tx {} match but moveToIndex already set to {}",
-                            logContext, pendingPrecedingShardNames, entry.cohort.getIdentifier(), moveToIndex);
+                            logContext, pendingPrecedingShardNames, entry.cohort.transactionId(), moveToIndex);
                     }
                 } else {
                     LOG.debug("{}: Preceding shard names {} for pending tx {} differ - skipping",
-                        logContext, pendingPrecedingShardNames, entry.cohort.getIdentifier());
+                        logContext, pendingPrecedingShardNames, entry.cohort.transactionId());
                 }
             }
         }
@@ -1022,7 +1022,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         final SimpleShardDataTreeCohort current = entry.cohort;
         verify(cohort.equals(current), "Attempted to pre-commit %s while %s is pending", cohort, current);
 
-        final TransactionIdentifier currentId = current.getIdentifier();
+        final TransactionIdentifier currentId = current.transactionId();
         LOG.debug("{}: Preparing transaction {}", logContext, currentId);
 
         final DataTreeCandidateTip candidate;
@@ -1034,9 +1034,9 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
             return;
         }
 
-        cohort.userPreCommit(candidate, new FutureCallback<Void>() {
+        cohort.userPreCommit(candidate, new FutureCallback<>() {
             @Override
-            public void onSuccess(final Void noop) {
+            public void onSuccess(final Empty result) {
                 // Set the tip of the data tree.
                 tip = verifyNotNull(candidate);
 
@@ -1067,7 +1067,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
 
     @SuppressWarnings("checkstyle:IllegalCatch")
     private void finishCommit(final SimpleShardDataTreeCohort cohort) {
-        final TransactionIdentifier txId = cohort.getIdentifier();
+        final TransactionIdentifier txId = cohort.transactionId();
         final DataTreeCandidate candidate = cohort.getCandidate();
 
         LOG.debug("{}: Resuming commit of transaction {}", logContext, txId);
@@ -1105,13 +1105,13 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
 
         final SimpleShardDataTreeCohort current = entry.cohort;
         if (!cohort.equals(current)) {
-            LOG.debug("{}: Transaction {} scheduled for commit step", logContext, cohort.getIdentifier());
+            LOG.debug("{}: Transaction {} scheduled for commit step", logContext, cohort.transactionId());
             return;
         }
 
-        LOG.debug("{}: Starting commit for transaction {}", logContext, current.getIdentifier());
+        LOG.debug("{}: Starting commit for transaction {}", logContext, current.transactionId());
 
-        final TransactionIdentifier txId = cohort.getIdentifier();
+        final TransactionIdentifier txId = cohort.transactionId();
         final Payload payload;
         try {
             payload = CommitTransactionPayload.create(txId, candidate, PayloadVersion.current(),
@@ -1183,11 +1183,11 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
     // the newReadWriteTransaction()
     final ShardDataTreeCohort newReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod,
             final Optional<SortedSet<String>> participatingShardNames) {
-        if (txId.getHistoryId().getHistoryId() == 0) {
+        final var historyId = txId.getHistoryId();
+        if (historyId.getHistoryId() == 0) {
             return createReadyCohort(txId, mod, participatingShardNames);
         }
-
-        return ensureTransactionChain(txId.getHistoryId(), null).createReadyCohort(txId, mod, participatingShardNames);
+        return ensureTransactionChain(historyId, null).createReadyCohort(txId, mod, participatingShardNames);
     }
 
     @SuppressFBWarnings(value = "DB_DUPLICATE_SWITCH_CLAUSES", justification = "See inline comments below.")
@@ -1212,11 +1212,11 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
 
         final OptionalLong updateOpt = accessTimeUpdater.apply(currentTx.cohort);
         if (updateOpt.isPresent()) {
-            final long newAccess =  updateOpt.getAsLong();
+            final long newAccess =  updateOpt.orElseThrow();
             final long newDelta = now - newAccess;
             if (newDelta < delta) {
                 LOG.debug("{}: Updated current transaction {} access time", logContext,
-                    currentTx.cohort.getIdentifier());
+                    currentTx.cohort.transactionId());
                 currentTx.lastAccess = newAccess;
                 delta = newDelta;
             }
@@ -1231,7 +1231,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         final State state = currentTx.cohort.getState();
 
         LOG.warn("{}: Current transaction {} has timed out after {} ms in state {}", logContext,
-            currentTx.cohort.getIdentifier(), deltaMillis, state);
+            currentTx.cohort.transactionId(), deltaMillis, state);
         boolean processNext = true;
         final TimeoutException cohortFailure = new TimeoutException("Backend timeout in state " + state + " after "
                 + deltaMillis + "ms");
@@ -1271,7 +1271,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
                 break;
             case COMMIT_PENDING:
                 LOG.warn("{}: Transaction {} is still committing, cannot abort", logContext,
-                    currentTx.cohort.getIdentifier());
+                    currentTx.cohort.transactionId());
                 currentTx.lastAccess = now;
                 processNext = false;
                 return;
@@ -1295,7 +1295,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         final Iterator<CommitEntry> it = Iterables.concat(pendingFinishCommits, pendingCommits,
                 pendingTransactions).iterator();
         if (!it.hasNext()) {
-            LOG.debug("{}: no open transaction while attempting to abort {}", logContext, cohort.getIdentifier());
+            LOG.debug("{}: no open transaction while attempting to abort {}", logContext, cohort.transactionId());
             return true;
         }
 
@@ -1303,8 +1303,8 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         final CommitEntry first = it.next();
         if (cohort.equals(first.cohort)) {
             if (cohort.getState() != State.COMMIT_PENDING) {
-                LOG.debug("{}: aborting head of queue {} in state {}", logContext, cohort.getIdentifier(),
-                    cohort.getIdentifier());
+                LOG.debug("{}: aborting head of queue {} in state {}", logContext, cohort.transactionId(),
+                    cohort.transactionId());
 
                 it.remove();
                 if (cohort.getCandidate() != null) {
@@ -1315,7 +1315,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
                 return true;
             }
 
-            LOG.warn("{}: transaction {} is committing, skipping abort", logContext, cohort.getIdentifier());
+            LOG.warn("{}: transaction {} is committing, skipping abort", logContext, cohort.transactionId());
             return false;
         }
 
@@ -1323,7 +1323,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         while (it.hasNext()) {
             final CommitEntry e = it.next();
             if (cohort.equals(e.cohort)) {
-                LOG.debug("{}: aborting queued transaction {}", logContext, cohort.getIdentifier());
+                LOG.debug("{}: aborting queued transaction {}", logContext, cohort.transactionId());
 
                 it.remove();
                 if (cohort.getCandidate() != null) {
@@ -1331,12 +1331,12 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
                 }
 
                 return true;
-            } else {
-                newTip = requireNonNullElse(e.cohort.getCandidate(), newTip);
             }
+
+            newTip = requireNonNullElse(e.cohort.getCandidate(), newTip);
         }
 
-        LOG.debug("{}: aborted transaction {} not found in the queue", logContext, cohort.getIdentifier());
+        LOG.debug("{}: aborted transaction {} not found in the queue", logContext, cohort.transactionId());
         return true;
     }
 
@@ -1346,16 +1346,16 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         while (iter.hasNext()) {
             final SimpleShardDataTreeCohort cohort = iter.next().cohort;
             if (cohort.getState() == State.CAN_COMMIT_COMPLETE) {
-                LOG.debug("{}: Revalidating queued transaction {}", logContext, cohort.getIdentifier());
+                LOG.debug("{}: Revalidating queued transaction {}", logContext, cohort.transactionId());
 
                 try {
                     tip.validate(cohort.getDataTreeModification());
                 } catch (DataValidationFailedException | RuntimeException e) {
-                    LOG.debug("{}: Failed to revalidate queued transaction {}", logContext, cohort.getIdentifier(), e);
+                    LOG.debug("{}: Failed to revalidate queued transaction {}", logContext, cohort.transactionId(), e);
                     cohort.reportFailure(e);
                 }
             } else if (cohort.getState() == State.PRE_COMMIT_COMPLETE) {
-                LOG.debug("{}: Repreparing queued transaction {}", logContext, cohort.getIdentifier());
+                LOG.debug("{}: Repreparing queued transaction {}", logContext, cohort.transactionId());
 
                 try {
                     tip.validate(cohort.getDataTreeModification());
@@ -1364,7 +1364,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
                     cohort.setNewCandidate(candidate);
                     tip = candidate;
                 } catch (RuntimeException | DataValidationFailedException e) {
-                    LOG.debug("{}: Failed to reprepare queued transaction {}", logContext, cohort.getIdentifier(), e);
+                    LOG.debug("{}: Failed to reprepare queued transaction {}", logContext, cohort.transactionId(), e);
                     cohort.reportFailure(e);
                 }
             }
index bfeb91f65b92ecbeb12f1aa5eb937b7a65cb4e2e..4c67c3fc23b4bfa2036b207f5704eae400cfa1f5 100644 (file)
@@ -10,9 +10,9 @@ package org.opendaylight.controller.cluster.datastore;
 import java.util.Optional;
 import java.util.function.Consumer;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 
 /**
  * Interface for a class that generates and publishes notifications for DataTreeChangeListeners.
@@ -21,6 +21,5 @@ import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
  */
 interface ShardDataTreeChangeListenerPublisher extends ShardDataTreeNotificationPublisher {
     void registerTreeChangeListener(YangInstanceIdentifier treeId, DOMDataTreeChangeListener listener,
-            Optional<DataTreeCandidate> initialState,
-            Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration);
+            Optional<DataTreeCandidate> initialState, Consumer<Registration> onRegistration);
 }
index d737b55437ff9af482272313ccd2aafdcb8952ae..83209731c4abfd59b5b090eb4719a1c4894b9707 100644 (file)
@@ -13,9 +13,9 @@ import akka.actor.Props;
 import java.util.Optional;
 import java.util.function.Consumer;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 
 /**
  * Implementation of ShardDataTreeChangeListenerPublisher that offloads the generation and publication of data tree
@@ -34,7 +34,7 @@ class ShardDataTreeChangeListenerPublisherActorProxy extends AbstractShardDataTr
     @Override
     public void registerTreeChangeListener(final YangInstanceIdentifier treeId,
             final DOMDataTreeChangeListener listener, final Optional<DataTreeCandidate> currentState,
-            final Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration) {
+            final Consumer<Registration> onRegistration) {
         final ShardDataTreeChangePublisherActor.RegisterListener regMessage =
                 new ShardDataTreeChangePublisherActor.RegisterListener(treeId, listener, currentState, onRegistration);
         log.debug("{}: Sending {} to publisher actor {}", logContext(), regMessage, publisherActor());
index e099d0bc925096ae50558bbdf8e758cbb97619a4..5e96133aaa116cda6d68a3a5f42bb0551d524026 100644 (file)
@@ -13,9 +13,9 @@ import akka.actor.Props;
 import java.util.Optional;
 import java.util.function.Consumer;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 
 /**
  * Actor used to generate and publish DataTreeChange notifications.
@@ -31,12 +31,11 @@ public final class ShardDataTreeChangePublisherActor
 
     @Override
     protected void handleReceive(final Object message) {
-        if (message instanceof RegisterListener) {
-            RegisterListener reg = (RegisterListener)message;
+        if (message instanceof RegisterListener reg) {
             LOG.debug("{}: Received {}", logContext(), reg);
             if (reg.initialState.isPresent()) {
                 DefaultShardDataTreeChangeListenerPublisher.notifySingleListener(reg.path, reg.listener,
-                        reg.initialState.get(), logContext());
+                        reg.initialState.orElseThrow(), logContext());
             } else {
                 reg.listener.onInitialData();
             }
@@ -55,11 +54,10 @@ public final class ShardDataTreeChangePublisherActor
         private final YangInstanceIdentifier path;
         private final DOMDataTreeChangeListener listener;
         private final Optional<DataTreeCandidate> initialState;
-        private final Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration;
+        private final Consumer<Registration> onRegistration;
 
         RegisterListener(final YangInstanceIdentifier path, final DOMDataTreeChangeListener listener,
-                final Optional<DataTreeCandidate> initialState,
-                final Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration) {
+                final Optional<DataTreeCandidate> initialState, final Consumer<Registration> onRegistration) {
             this.path = requireNonNull(path);
             this.listener = requireNonNull(listener);
             this.initialState = requireNonNull(initialState);
index 581768c0ed73352d5c1b80fc5e32033dfe266cf2..03cc77f0e064ff34880a5c1ae664572435588344 100644 (file)
@@ -14,14 +14,15 @@ import com.google.common.primitives.UnsignedLong;
 import com.google.common.util.concurrent.FutureCallback;
 import java.util.Optional;
 import java.util.SortedSet;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateTip;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 @VisibleForTesting
-public abstract class ShardDataTreeCohort implements Identifiable<TransactionIdentifier> {
+public abstract class ShardDataTreeCohort {
     public enum State {
         READY,
         CAN_COMMIT_PENDING,
@@ -39,6 +40,8 @@ public abstract class ShardDataTreeCohort implements Identifiable<TransactionIde
         // Prevent foreign instantiation
     }
 
+    abstract @NonNull TransactionIdentifier transactionId();
+
     // FIXME: This leaks internal state generated in preCommit,
     // should be result of canCommit
     abstract DataTreeCandidateTip getCandidate();
@@ -49,13 +52,13 @@ public abstract class ShardDataTreeCohort implements Identifiable<TransactionIde
 
     // FIXME: Should return rebased DataTreeCandidateTip
     @VisibleForTesting
-    public abstract void canCommit(FutureCallback<Void> callback);
+    public abstract void canCommit(FutureCallback<Empty> callback);
 
     @VisibleForTesting
     public abstract void preCommit(FutureCallback<DataTreeCandidate> callback);
 
     @VisibleForTesting
-    public abstract void abort(FutureCallback<Void> callback);
+    public abstract void abort(FutureCallback<Empty> callback);
 
     @VisibleForTesting
     public abstract void commit(FutureCallback<UnsignedLong> callback);
@@ -70,6 +73,6 @@ public abstract class ShardDataTreeCohort implements Identifiable<TransactionIde
     }
 
     ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) {
-        return toStringHelper.add("id", getIdentifier()).add("state", getState());
+        return toStringHelper.add("id", transactionId()).add("state", getState());
     }
 }
index 060c28ae1d44385b6e54dc21b60b4031f53233dd..cab0c534ac90819e48b93ed2c26a9264f0c2ba20 100644 (file)
@@ -58,7 +58,7 @@ final class ShardDataTreeListenerInfoMXBeanImpl extends AbstractMXBean implement
             return stateCache.get();
         } catch (Exception e) {
             Throwables.throwIfUnchecked(e);
-            throw new RuntimeException(e);
+            throw new IllegalStateException(e);
         }
     }
 
@@ -67,7 +67,7 @@ final class ShardDataTreeListenerInfoMXBeanImpl extends AbstractMXBean implement
     private static List<DataTreeListenerInfo> getListenerActorsInfo(final Collection<ActorSelection> actors) {
         final Timeout timeout = new Timeout(20, TimeUnit.SECONDS);
         final List<Future<Object>> futureList = new ArrayList<>(actors.size());
-        for (ActorSelection actor: actors) {
+        for (ActorSelection actor : actors) {
             futureList.add(Patterns.ask(actor, GetInfo.INSTANCE, timeout));
         }
 
index 4dbd818488c29243116f191469f959d6eb3a42ed..373a130004521d835430da24db5d3b1d875dd62e 100644 (file)
@@ -8,7 +8,7 @@
 package org.opendaylight.controller.cluster.datastore;
 
 import java.util.concurrent.TimeUnit;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 
 /**
  * Interface for a class the publishes data tree notifications.
index c22bc3bd98930c2e561a7b4f9b06083da1dfb8d0..095a542f6c69417b03b9d17f78dc1e9db5d17e4c 100644 (file)
@@ -10,7 +10,7 @@ package org.opendaylight.controller.cluster.datastore;
 import com.google.common.base.Stopwatch;
 import java.util.concurrent.TimeUnit;
 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 
 /**
  * Actor used to generate and publish data tree notifications. This is used to offload the potentially
@@ -40,7 +40,7 @@ public class ShardDataTreeNotificationPublisherActor<T extends ShardDataTreeNoti
     }
 
     @Override
-    protected void handleReceive(Object message) {
+    protected void handleReceive(final Object message) {
         if (message instanceof PublishNotifications) {
             PublishNotifications toPublish = (PublishNotifications)message;
             timer.start();
@@ -65,7 +65,7 @@ public class ShardDataTreeNotificationPublisherActor<T extends ShardDataTreeNoti
     static class PublishNotifications {
         private final DataTreeCandidate candidate;
 
-        PublishNotifications(DataTreeCandidate candidate) {
+        PublishNotifications(final DataTreeCandidate candidate) {
             this.candidate = candidate;
         }
     }
index 7917ae0177f6a9f737cd59b04d314e67b841c95e..6be13ae1295de42282ac34b0713e8bb1ed27e662 100644 (file)
@@ -13,11 +13,12 @@ import static java.util.Objects.requireNonNull;
 import com.google.common.base.MoreObjects;
 import java.util.Optional;
 import java.util.SortedSet;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -36,7 +37,7 @@ final class ShardDataTreeTransactionChain extends ShardDataTreeTransactionParent
     private boolean closed;
 
     ShardDataTreeTransactionChain(final LocalHistoryIdentifier localHistoryIdentifier, final ShardDataTree dataTree) {
-        this.chainId = requireNonNull(localHistoryIdentifier);
+        chainId = requireNonNull(localHistoryIdentifier);
         this.dataTree = requireNonNull(dataTree);
     }
 
@@ -53,19 +54,20 @@ final class ShardDataTreeTransactionChain extends ShardDataTreeTransactionParent
         return previousTx.getSnapshot();
     }
 
-    ReadOnlyShardDataTreeTransaction newReadOnlyTransaction(final TransactionIdentifier txId) {
+    @NonNull ReadOnlyShardDataTreeTransaction newReadOnlyTransaction(final TransactionIdentifier txId) {
         final DataTreeSnapshot snapshot = getSnapshot();
         LOG.debug("Allocated read-only transaction {} snapshot {}", txId, snapshot);
 
         return new ReadOnlyShardDataTreeTransaction(this, txId, snapshot);
     }
 
-    ReadWriteShardDataTreeTransaction newReadWriteTransaction(final TransactionIdentifier txId) {
+    @NonNull ReadWriteShardDataTreeTransaction newReadWriteTransaction(final TransactionIdentifier txId) {
         final DataTreeSnapshot snapshot = getSnapshot();
         LOG.debug("Allocated read-write transaction {} snapshot {}", txId, snapshot);
 
-        openTransaction = new ReadWriteShardDataTreeTransaction(this, txId, snapshot.newModification());
-        return openTransaction;
+        final var ret = new ReadWriteShardDataTreeTransaction(this, txId, snapshot.newModification());
+        openTransaction = ret;
+        return ret;
     }
 
     void close() {
index 0db6f083ac655908ae57de374e348d480d430d60..d7992596e0e37fa783290a5066f909de71f6be1d 100644 (file)
@@ -10,7 +10,7 @@ package org.opendaylight.controller.cluster.datastore;
 import java.util.Optional;
 import java.util.SortedSet;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 abstract class ShardDataTreeTransactionParent {
 
index 49cb3c4d38f66d7faf662d7b8d916e7cf6540bb2..ac751fb33d3eb5f8ba22f22fcc86cdf6fb30b139 100644 (file)
@@ -18,7 +18,8 @@ import org.opendaylight.controller.cluster.datastore.messages.ReadData;
  *
  * @author syedbahm
  */
-public class ShardReadTransaction extends ShardTransaction {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class ShardReadTransaction extends ShardTransaction {
     private final AbstractShardDataTreeTransaction<?> transaction;
 
     public ShardReadTransaction(final AbstractShardDataTreeTransaction<?> transaction, final ActorRef shardActor,
index 8cbf14367358da42ad0ec9a0fa5d03d0f41e5d8b..84c346def864f3dc2627eb98e137472771e90939 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore;
 
 import akka.actor.ActorRef;
@@ -17,14 +16,15 @@ import org.opendaylight.controller.cluster.datastore.messages.ReadData;
  *
  * @author syedbahm
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class ShardReadWriteTransaction extends ShardWriteTransaction {
-    public ShardReadWriteTransaction(ReadWriteShardDataTreeTransaction transaction, ActorRef shardActor,
-            ShardStats shardStats) {
+    public ShardReadWriteTransaction(final ReadWriteShardDataTreeTransaction transaction, final ActorRef shardActor,
+            final ShardStats shardStats) {
         super(transaction, shardActor, shardStats);
     }
 
     @Override
-    public void handleReceive(Object message) {
+    public void handleReceive(final Object message) {
         if (ReadData.isSerializedType(message)) {
             readData(ReadData.fromSerializable(message));
         } else if (DataExists.isSerializedType(message)) {
index 87d70da1ab3a9c41b0898b7a8551242655b297ee..3baad570b7f4a83a6fc14ce2bfdcdfcd0e8d8669 100644 (file)
@@ -15,8 +15,8 @@ import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnap
 import org.opendaylight.controller.cluster.datastore.persisted.ShardSnapshotState;
 import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeXMLOutput;
 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.slf4j.Logger;
 
@@ -46,7 +46,7 @@ abstract class ShardRecoveryCoordinator implements RaftActorRecoveryCohort {
 
         WithSnapshot(final ShardDataTree store, final String shardName, final Logger log, final Snapshot snapshot) {
             super(store, shardName, log);
-            this.restoreFromSnapshot = requireNonNull(snapshot);
+            restoreFromSnapshot = requireNonNull(snapshot);
         }
 
         @Override
index 44393a14f1c1ff1daae4a14d808a36225133d689..3b3462884f85b6987b8a1777e797503e58255976 100644 (file)
@@ -29,6 +29,7 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 /**
  * The ShardTransaction Actor represents a remote transaction that delegates all actions to DOMDataReadWriteTransaction.
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public abstract class ShardTransaction extends AbstractUntypedActorWithMetering {
     private final ActorRef shardActor;
     private final ShardStats shardStats;
@@ -119,7 +120,7 @@ public abstract class ShardTransaction extends AbstractUntypedActorWithMetering
     @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "Some fields are not Serializable but we don't "
             + "create remote instances of this actor and thus don't need it to be Serializable.")
     private static class ShardTransactionCreator implements Creator<ShardTransaction> {
-
+        @java.io.Serial
         private static final long serialVersionUID = 1L;
 
         final AbstractShardDataTreeTransaction<?> transaction;
@@ -139,23 +140,14 @@ public abstract class ShardTransaction extends AbstractUntypedActorWithMetering
 
         @Override
         public ShardTransaction create() {
-            final ShardTransaction tx;
-            switch (type) {
-                case READ_ONLY:
-                    tx = new ShardReadTransaction(transaction, shardActor, shardStats);
-                    break;
-                case READ_WRITE:
-                    tx = new ShardReadWriteTransaction((ReadWriteShardDataTreeTransaction)transaction, shardActor,
-                            shardStats);
-                    break;
-                case WRITE_ONLY:
-                    tx = new ShardWriteTransaction((ReadWriteShardDataTreeTransaction)transaction, shardActor,
-                            shardStats);
-                    break;
-                default:
-                    throw new IllegalArgumentException("Unhandled transaction type " + type);
-            }
-
+            final var tx = switch (type) {
+                case READ_ONLY -> new ShardReadTransaction(transaction, shardActor, shardStats);
+                case READ_WRITE -> new ShardReadWriteTransaction((ReadWriteShardDataTreeTransaction) transaction,
+                    shardActor, shardStats);
+                case WRITE_ONLY -> new ShardWriteTransaction((ReadWriteShardDataTreeTransaction) transaction,
+                    shardActor, shardStats);
+                default -> throw new IllegalArgumentException("Unhandled transaction type " + type);
+            };
             tx.getContext().setReceiveTimeout(datastoreContext.getShardTransactionIdleTimeout());
             return tx;
         }
index 881f3c39d0c701e4c93b391797b6694f01f3254a..122c43592aeaf8b9099f59e98f4eefa3fe9588b4 100644 (file)
@@ -22,7 +22,8 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
  *
  * @author Thomas Pantelis
  */
-class ShardTransactionActorFactory {
+@Deprecated(since = "9.0.0", forRemoval = true)
+final class ShardTransactionActorFactory {
     private static final AtomicLong ACTOR_NAME_COUNTER = new AtomicLong();
 
     private final ShardDataTree dataTree;
@@ -33,9 +34,9 @@ class ShardTransactionActorFactory {
     private final ActorRef shardActor;
     private final String shardName;
 
-    ShardTransactionActorFactory(ShardDataTree dataTree, DatastoreContext datastoreContext,
-            String txnDispatcherPath, ActorRef shardActor, ActorContext actorContext, ShardStats shardMBean,
-            String shardName) {
+    ShardTransactionActorFactory(final ShardDataTree dataTree, final DatastoreContext datastoreContext,
+            final String txnDispatcherPath, final ActorRef shardActor, final ActorContext actorContext,
+            final ShardStats shardMBean, final String shardName) {
         this.dataTree = requireNonNull(dataTree);
         this.datastoreContext = requireNonNull(datastoreContext);
         this.txnDispatcherPath = requireNonNull(txnDispatcherPath);
@@ -62,20 +63,12 @@ class ShardTransactionActorFactory {
         return sb.append(txId.getTransactionId()).append('_').append(ACTOR_NAME_COUNTER.incrementAndGet()).toString();
     }
 
-    ActorRef newShardTransaction(TransactionType type, TransactionIdentifier transactionID) {
-        final AbstractShardDataTreeTransaction<?> transaction;
-        switch (type) {
-            case READ_ONLY:
-                transaction = dataTree.newReadOnlyTransaction(transactionID);
-                break;
-            case READ_WRITE:
-            case WRITE_ONLY:
-                transaction = dataTree.newReadWriteTransaction(transactionID);
-                break;
-            default:
-                throw new IllegalArgumentException("Unsupported transaction type " + type);
-        }
-
+    ActorRef newShardTransaction(final TransactionType type, final TransactionIdentifier transactionID) {
+        final AbstractShardDataTreeTransaction<?> transaction = switch (type) {
+            case READ_ONLY -> dataTree.newReadOnlyTransaction(transactionID);
+            case READ_WRITE, WRITE_ONLY -> dataTree.newReadWriteTransaction(transactionID);
+            default -> throw new IllegalArgumentException("Unsupported transaction type " + type);
+        };
         return actorContext.actorOf(ShardTransaction.props(type, transaction, shardActor, datastoreContext, shardMBean)
             .withDispatcher(txnDispatcherPath), actorNameFor(transactionID));
     }
index 9a439bd35bad6d9a86efd500be5e2d987f8f1ab9..bfd7802213e05b1146e32c9031db0fe87ca1e141 100644 (file)
@@ -25,6 +25,7 @@ import scala.concurrent.duration.FiniteDuration;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 class ShardTransactionMessageRetrySupport implements Closeable {
     private static final Logger LOG = LoggerFactory.getLogger(ShardTransactionMessageRetrySupport.class);
 
@@ -81,7 +82,7 @@ class ShardTransactionMessageRetrySupport implements Closeable {
         messagesToRetry.clear();
     }
 
-    private static class MessageInfo {
+    private static final class MessageInfo {
         final Object message;
         final ActorRef replyTo;
         final String failureMessage;
index 67f2c684a15743e86c0855010bd72c492baf7fd3..764361a016dd56a2074dca2b3201927290f6da29 100644 (file)
@@ -6,7 +6,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore;
 
 import akka.actor.ActorRef;
@@ -24,8 +23,8 @@ import org.opendaylight.controller.cluster.datastore.modification.Modification;
  *
  * @author syedbahm
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class ShardWriteTransaction extends ShardTransaction {
-
     private int totalBatchedModificationsReceived;
     private Exception lastBatchedModificationsException;
     private final ReadWriteShardDataTreeTransaction transaction;
index f42af0b88e5847c3201520cfbca632b4b2da4dcd..2c7d13189f61a832fe386ed383cb1b4b193120ad 100644 (file)
@@ -7,21 +7,23 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static com.google.common.base.Verify.verifyNotNull;
 import static java.util.Objects.requireNonNull;
 
 import com.google.common.base.MoreObjects.ToStringHelper;
 import com.google.common.base.Preconditions;
-import com.google.common.base.Verify;
 import com.google.common.primitives.UnsignedLong;
 import com.google.common.util.concurrent.FutureCallback;
 import java.util.Optional;
 import java.util.SortedSet;
 import java.util.concurrent.CompletionStage;
+import org.eclipse.jdt.annotation.NonNull;
 import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateTip;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -30,7 +32,7 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
 
     private final DataTreeModification transaction;
     private final ShardDataTree dataTree;
-    private final TransactionIdentifier transactionId;
+    private final @NonNull TransactionIdentifier transactionId;
     private final CompositeDataTreeCohort userCohorts;
     private final @Nullable SortedSet<String> participatingShardNames;
 
@@ -54,13 +56,13 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
         this.dataTree = requireNonNull(dataTree);
         this.transaction = requireNonNull(transaction);
         this.transactionId = requireNonNull(transactionId);
-        this.userCohorts = null;
-        this.participatingShardNames = null;
+        userCohorts = null;
+        participatingShardNames = null;
         this.nextFailure = requireNonNull(nextFailure);
     }
 
     @Override
-    public TransactionIdentifier getIdentifier() {
+    TransactionIdentifier transactionId() {
         return transactionId;
     }
 
@@ -81,17 +83,17 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
 
     private void checkState(final State expected) {
         Preconditions.checkState(state == expected, "State %s does not match expected state %s for %s",
-                state, expected, getIdentifier());
+                state, expected, transactionId());
     }
 
     @Override
-    public void canCommit(final FutureCallback<Void> newCallback) {
+    public void canCommit(final FutureCallback<Empty> newCallback) {
         if (state == State.CAN_COMMIT_PENDING) {
             return;
         }
 
         checkState(State.READY);
-        this.callback = requireNonNull(newCallback);
+        callback = requireNonNull(newCallback);
         state = State.CAN_COMMIT_PENDING;
 
         if (nextFailure == null) {
@@ -104,7 +106,7 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
     @Override
     public void preCommit(final FutureCallback<DataTreeCandidate> newCallback) {
         checkState(State.CAN_COMMIT_COMPLETE);
-        this.callback = requireNonNull(newCallback);
+        callback = requireNonNull(newCallback);
         state = State.PRE_COMMIT_PENDING;
 
         if (nextFailure == null) {
@@ -115,9 +117,9 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
     }
 
     @Override
-    public void abort(final FutureCallback<Void> abortCallback) {
+    public void abort(final FutureCallback<Empty> abortCallback) {
         if (!dataTree.startAbort(this)) {
-            abortCallback.onSuccess(null);
+            abortCallback.onSuccess(Empty.value());
             return;
         }
 
@@ -126,15 +128,15 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
 
         final Optional<CompletionStage<?>> maybeAborts = userCohorts.abort();
         if (!maybeAborts.isPresent()) {
-            abortCallback.onSuccess(null);
+            abortCallback.onSuccess(Empty.value());
             return;
         }
 
-        maybeAborts.get().whenComplete((noop, failure) -> {
+        maybeAborts.orElseThrow().whenComplete((noop, failure) -> {
             if (failure != null) {
                 abortCallback.onFailure(failure);
             } else {
-                abortCallback.onSuccess(null);
+                abortCallback.onSuccess(Empty.value());
             }
         });
     }
@@ -142,7 +144,7 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
     @Override
     public void commit(final FutureCallback<UnsignedLong> newCallback) {
         checkState(State.PRE_COMMIT_COMPLETE);
-        this.callback = requireNonNull(newCallback);
+        callback = requireNonNull(newCallback);
         state = State.COMMIT_PENDING;
 
         if (nextFailure == null) {
@@ -154,20 +156,20 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
 
     private <T> FutureCallback<T> switchState(final State newState) {
         @SuppressWarnings("unchecked")
-        final FutureCallback<T> ret = (FutureCallback<T>) this.callback;
-        this.callback = null;
+        final FutureCallback<T> ret = (FutureCallback<T>) callback;
+        callback = null;
         LOG.debug("Transaction {} changing state from {} to {}", transactionId, state, newState);
-        this.state = newState;
+        state = newState;
         return ret;
     }
 
     void setNewCandidate(final DataTreeCandidateTip dataTreeCandidate) {
         checkState(State.PRE_COMMIT_COMPLETE);
-        this.candidate = Verify.verifyNotNull(dataTreeCandidate);
+        candidate = verifyNotNull(dataTreeCandidate);
     }
 
     void successfulCanCommit() {
-        switchState(State.CAN_COMMIT_COMPLETE).onSuccess(null);
+        switchState(State.CAN_COMMIT_COMPLETE).onSuccess(Empty.value());
     }
 
     void failedCanCommit(final Exception cause) {
@@ -181,16 +183,16 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
      * @param dataTreeCandidate {@link DataTreeCandidate} under consideration
      * @param futureCallback the callback to invoke on completion, which may be immediate or async.
      */
-    void userPreCommit(final DataTreeCandidate dataTreeCandidate, final FutureCallback<Void> futureCallback) {
+    void userPreCommit(final DataTreeCandidate dataTreeCandidate, final FutureCallback<Empty> futureCallback) {
         userCohorts.reset();
 
-        final Optional<CompletionStage<Void>> maybeCanCommitFuture = userCohorts.canCommit(dataTreeCandidate);
+        final Optional<CompletionStage<Empty>> maybeCanCommitFuture = userCohorts.canCommit(dataTreeCandidate);
         if (!maybeCanCommitFuture.isPresent()) {
             doUserPreCommit(futureCallback);
             return;
         }
 
-        maybeCanCommitFuture.get().whenComplete((noop, failure) -> {
+        maybeCanCommitFuture.orElseThrow().whenComplete((noop, failure) -> {
             if (failure != null) {
                 futureCallback.onFailure(failure);
             } else {
@@ -199,25 +201,25 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
         });
     }
 
-    private void doUserPreCommit(final FutureCallback<Void> futureCallback) {
-        final Optional<CompletionStage<Void>> maybePreCommitFuture = userCohorts.preCommit();
+    private void doUserPreCommit(final FutureCallback<Empty> futureCallback) {
+        final Optional<CompletionStage<Empty>> maybePreCommitFuture = userCohorts.preCommit();
         if (!maybePreCommitFuture.isPresent()) {
-            futureCallback.onSuccess(null);
+            futureCallback.onSuccess(Empty.value());
             return;
         }
 
-        maybePreCommitFuture.get().whenComplete((noop, failure) -> {
+        maybePreCommitFuture.orElseThrow().whenComplete((noop, failure) -> {
             if (failure != null) {
                 futureCallback.onFailure(failure);
             } else {
-                futureCallback.onSuccess(null);
+                futureCallback.onSuccess(Empty.value());
             }
         });
     }
 
     void successfulPreCommit(final DataTreeCandidateTip dataTreeCandidate) {
         LOG.trace("Transaction {} prepared candidate {}", transaction, dataTreeCandidate);
-        this.candidate = Verify.verifyNotNull(dataTreeCandidate);
+        candidate = verifyNotNull(dataTreeCandidate);
         switchState(State.PRE_COMMIT_COMPLETE).onSuccess(dataTreeCandidate);
     }
 
@@ -233,13 +235,13 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
     }
 
     void successfulCommit(final UnsignedLong journalIndex, final Runnable onComplete) {
-        final Optional<CompletionStage<Void>> maybeCommitFuture = userCohorts.commit();
+        final Optional<CompletionStage<Empty>> maybeCommitFuture = userCohorts.commit();
         if (!maybeCommitFuture.isPresent()) {
             finishSuccessfulCommit(journalIndex, onComplete);
             return;
         }
 
-        maybeCommitFuture.get().whenComplete((noop, failure) -> {
+        maybeCommitFuture.orElseThrow().whenComplete((noop, failure) -> {
             if (failure != null) {
                 LOG.error("User cohorts failed to commit", failure);
             }
@@ -271,7 +273,7 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
 
     void reportFailure(final Exception cause) {
         if (nextFailure == null) {
-            this.nextFailure = requireNonNull(cause);
+            nextFailure = requireNonNull(cause);
         } else {
             LOG.debug("Transaction {} already has a set failure, not updating it", transactionId, cause);
         }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/SingleCommitCohortProxy.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/SingleCommitCohortProxy.java
deleted file mode 100644 (file)
index 5e8a954..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static java.util.Objects.requireNonNull;
-
-import akka.dispatch.OnComplete;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.Arrays;
-import java.util.List;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-
-/**
- * A cohort proxy implementation for a single-shard transaction commit. If the transaction was a direct commit
- * to the shard, this implementation elides the CanCommitTransaction and CommitTransaction messages to the
- * shard as an optimization.
- *
- * @author Thomas Pantelis
- */
-class SingleCommitCohortProxy extends AbstractThreePhaseCommitCohort<Object> {
-    private static final Logger LOG = LoggerFactory.getLogger(SingleCommitCohortProxy.class);
-
-    private final ActorUtils actorUtils;
-    private final Future<Object> cohortFuture;
-    private final TransactionIdentifier transactionId;
-    private volatile DOMStoreThreePhaseCommitCohort delegateCohort = NoOpDOMStoreThreePhaseCommitCohort.INSTANCE;
-    private final OperationCallback.Reference operationCallbackRef;
-
-    SingleCommitCohortProxy(ActorUtils actorUtils, Future<Object> cohortFuture, TransactionIdentifier transactionId,
-            OperationCallback.Reference operationCallbackRef) {
-        this.actorUtils = actorUtils;
-        this.cohortFuture = cohortFuture;
-        this.transactionId = requireNonNull(transactionId);
-        this.operationCallbackRef = operationCallbackRef;
-    }
-
-    @Override
-    public ListenableFuture<Boolean> canCommit() {
-        LOG.debug("Tx {} canCommit", transactionId);
-
-        final SettableFuture<Boolean> returnFuture = SettableFuture.create();
-
-        cohortFuture.onComplete(new OnComplete<Object>() {
-            @Override
-            public void onComplete(Throwable failure, Object cohortResponse) {
-                if (failure != null) {
-                    operationCallbackRef.get().failure();
-                    returnFuture.setException(failure);
-                    return;
-                }
-
-                operationCallbackRef.get().success();
-
-                LOG.debug("Tx {} successfully completed direct commit", transactionId);
-
-                // The Future was the result of a direct commit to the shard, essentially eliding the
-                // front-end 3PC coordination. We don't really care about the specific Future
-                // response object, only that it completed successfully. At this point the Tx is complete
-                // so return true. The subsequent preCommit and commit phases will be no-ops, ie return
-                // immediate success, to complete the 3PC for the front-end.
-                returnFuture.set(Boolean.TRUE);
-            }
-        }, actorUtils.getClientDispatcher());
-
-        return returnFuture;
-    }
-
-    @Override
-    public ListenableFuture<Void> preCommit() {
-        return delegateCohort.preCommit();
-    }
-
-    @Override
-    public ListenableFuture<Void> abort() {
-        return delegateCohort.abort();
-    }
-
-    @Override
-    public ListenableFuture<Void> commit() {
-        return delegateCohort.commit();
-    }
-
-    @Override
-    List<Future<Object>> getCohortFutures() {
-        return Arrays.asList(cohortFuture);
-    }
-}
index 0278c1d1e590fccf69498bdeacfe02bddeea7120..57c680da2a6f0d673b7ee1861eaecd05e6c07eba 100644 (file)
@@ -20,7 +20,7 @@ import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 /**
  * Standalone transaction specialization of {@link AbstractFrontendHistory}. There can be multiple open transactions
@@ -60,12 +60,12 @@ final class StandaloneFrontendHistory extends AbstractFrontendHistory {
 
     @Override
     FrontendTransaction createOpenSnapshot(final TransactionIdentifier id) {
-        return FrontendReadOnlyTransaction.create(this, tree.newReadOnlyTransaction(id));
+        return FrontendReadOnlyTransaction.create(this, tree.newStandaloneReadOnlyTransaction(id));
     }
 
     @Override
     FrontendTransaction createOpenTransaction(final TransactionIdentifier id) {
-        return FrontendReadWriteTransaction.createOpen(this, tree.newReadWriteTransaction(id));
+        return FrontendReadWriteTransaction.createOpen(this, tree.newStandaloneReadWriteTransaction(id));
     }
 
     @Override
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionChainProxy.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionChainProxy.java
deleted file mode 100644 (file)
index 4ef89b4..0000000
+++ /dev/null
@@ -1,337 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static com.google.common.base.Preconditions.checkState;
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import akka.dispatch.Futures;
-import akka.dispatch.OnComplete;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainClosedException;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-import scala.concurrent.Promise;
-
-/**
- * A chain of {@link TransactionProxy}s. It allows a single open transaction to be open
- * at a time. For remote transactions, it also tracks the outstanding readiness requests
- * towards the shard and unblocks operations only after all have completed.
- */
-final class TransactionChainProxy extends AbstractTransactionContextFactory<LocalTransactionChain>
-        implements DOMStoreTransactionChain {
-    private abstract static class State {
-        /**
-         * Check if it is okay to allocate a new transaction.
-         * @throws IllegalStateException if a transaction may not be allocated.
-         */
-        abstract void checkReady();
-
-        /**
-         * Return the future which needs to be waited for before shard information
-         * is returned (which unblocks remote transactions).
-         * @return Future to wait for, or null of no wait is necessary
-         */
-        abstract Future<?> previousFuture();
-    }
-
-    private abstract static class Pending extends State {
-        private final TransactionIdentifier transaction;
-        private final Future<?> previousFuture;
-
-        Pending(final TransactionIdentifier transaction, final Future<?> previousFuture) {
-            this.previousFuture = previousFuture;
-            this.transaction = requireNonNull(transaction);
-        }
-
-        @Override
-        final Future<?> previousFuture() {
-            return previousFuture;
-        }
-
-        final TransactionIdentifier getIdentifier() {
-            return transaction;
-        }
-    }
-
-    private static final class Allocated extends Pending {
-        Allocated(final TransactionIdentifier transaction, final Future<?> previousFuture) {
-            super(transaction, previousFuture);
-        }
-
-        @Override
-        void checkReady() {
-            throw new IllegalStateException(String.format("Previous transaction %s is not ready yet", getIdentifier()));
-        }
-    }
-
-    private static final class Submitted extends Pending {
-        Submitted(final TransactionIdentifier transaction, final Future<?> previousFuture) {
-            super(transaction, previousFuture);
-        }
-
-        @Override
-        void checkReady() {
-            // Okay to allocate
-        }
-    }
-
-    private abstract static class DefaultState extends State {
-        @Override
-        final Future<?> previousFuture() {
-            return null;
-        }
-    }
-
-    private static final State IDLE_STATE = new DefaultState() {
-        @Override
-        void checkReady() {
-            // Okay to allocate
-        }
-    };
-
-    private static final State CLOSED_STATE = new DefaultState() {
-        @Override
-        void checkReady() {
-            throw new DOMTransactionChainClosedException("Transaction chain has been closed");
-        }
-    };
-
-    private static final Logger LOG = LoggerFactory.getLogger(TransactionChainProxy.class);
-    private static final AtomicReferenceFieldUpdater<TransactionChainProxy, State> STATE_UPDATER =
-            AtomicReferenceFieldUpdater.newUpdater(TransactionChainProxy.class, State.class, "currentState");
-
-    private final TransactionContextFactory parent;
-    private volatile State currentState = IDLE_STATE;
-
-    /**
-     * This map holds Promise instances for each read-only tx. It is used to maintain ordering of tx creates
-     * wrt to read-only tx's between this class and a LocalTransactionChain since they're bridged by
-     * asynchronous futures. Otherwise, in the following scenario, eg:
-     * <p/>
-     *   1) Create write tx1 on chain
-     *   2) do write and submit
-     *   3) Create read-only tx2 on chain and issue read
-     *   4) Create write tx3 on chain, do write but do not submit
-     * <p/>
-     * if the sequence/timing is right, tx3 may create its local tx on the LocalTransactionChain before tx2,
-     * which results in tx2 failing b/c tx3 isn't ready yet. So maintaining ordering prevents this issue
-     * (see Bug 4774).
-     * <p/>
-     * A Promise is added via newReadOnlyTransaction. When the parent class completes the primary shard
-     * lookup and creates the TransactionContext (either success or failure), onTransactionContextCreated is
-     * called which completes the Promise. A write tx that is created prior to completion will wait on the
-     * Promise's Future via findPrimaryShard.
-     */
-    private final ConcurrentMap<TransactionIdentifier, Promise<Object>> priorReadOnlyTxPromises =
-            new ConcurrentHashMap<>();
-
-    TransactionChainProxy(final TransactionContextFactory parent, final LocalHistoryIdentifier historyId) {
-        super(parent.getActorUtils(), historyId);
-        this.parent = parent;
-    }
-
-    @Override
-    public DOMStoreReadTransaction newReadOnlyTransaction() {
-        currentState.checkReady();
-        TransactionProxy transactionProxy = new TransactionProxy(this, TransactionType.READ_ONLY);
-        priorReadOnlyTxPromises.put(transactionProxy.getIdentifier(), Futures.<Object>promise());
-        return transactionProxy;
-    }
-
-    @Override
-    public DOMStoreReadWriteTransaction newReadWriteTransaction() {
-        getActorUtils().acquireTxCreationPermit();
-        return allocateWriteTransaction(TransactionType.READ_WRITE);
-    }
-
-    @Override
-    public DOMStoreWriteTransaction newWriteOnlyTransaction() {
-        getActorUtils().acquireTxCreationPermit();
-        return allocateWriteTransaction(TransactionType.WRITE_ONLY);
-    }
-
-    @Override
-    public void close() {
-        currentState = CLOSED_STATE;
-
-        // Send a close transaction chain request to each and every shard
-
-        getActorUtils().broadcast(version -> new CloseTransactionChain(getHistoryId(), version).toSerializable(),
-                CloseTransactionChain.class);
-    }
-
-    private TransactionProxy allocateWriteTransaction(final TransactionType type) {
-        State localState = currentState;
-        localState.checkReady();
-
-        final TransactionProxy ret = new TransactionProxy(this, type);
-        currentState = new Allocated(ret.getIdentifier(), localState.previousFuture());
-        return ret;
-    }
-
-    @Override
-    protected LocalTransactionChain factoryForShard(final String shardName, final ActorSelection shardLeader,
-            final ReadOnlyDataTree dataTree) {
-        final LocalTransactionChain ret = new LocalTransactionChain(this, shardLeader, dataTree);
-        LOG.debug("Allocated transaction chain {} for shard {} leader {}", ret, shardName, shardLeader);
-        return ret;
-    }
-
-    /**
-     * This method is overridden to ensure the previous Tx's ready operations complete
-     * before we initiate the next Tx in the chain to avoid creation failures if the
-     * previous Tx's ready operations haven't completed yet.
-     */
-    @SuppressWarnings({ "unchecked", "rawtypes" })
-    @Override
-    protected Future<PrimaryShardInfo> findPrimaryShard(final String shardName, final TransactionIdentifier txId) {
-        // Read current state atomically
-        final State localState = currentState;
-
-        // There are no outstanding futures, shortcut
-        Future<?> previous = localState.previousFuture();
-        if (previous == null) {
-            return combineFutureWithPossiblePriorReadOnlyTxFutures(parent.findPrimaryShard(shardName, txId), txId);
-        }
-
-        final String previousTransactionId;
-
-        if (localState instanceof Pending) {
-            previousTransactionId = ((Pending) localState).getIdentifier().toString();
-            LOG.debug("Tx: {} - waiting for ready futures with pending Tx {}", txId, previousTransactionId);
-        } else {
-            previousTransactionId = "";
-            LOG.debug("Waiting for ready futures on chain {}", getHistoryId());
-        }
-
-        previous = combineFutureWithPossiblePriorReadOnlyTxFutures(previous, txId);
-
-        // Add a callback for completion of the combined Futures.
-        final Promise<PrimaryShardInfo> returnPromise = Futures.promise();
-
-        final OnComplete onComplete = new OnComplete() {
-            @Override
-            public void onComplete(final Throwable failure, final Object notUsed) {
-                if (failure != null) {
-                    // A Ready Future failed so fail the returned Promise.
-                    LOG.error("Tx: {} - ready future failed for previous Tx {}", txId, previousTransactionId);
-                    returnPromise.failure(failure);
-                } else {
-                    LOG.debug("Tx: {} - previous Tx {} readied - proceeding to FindPrimaryShard",
-                            txId, previousTransactionId);
-
-                    // Send the FindPrimaryShard message and use the resulting Future to complete the
-                    // returned Promise.
-                    returnPromise.completeWith(parent.findPrimaryShard(shardName, txId));
-                }
-            }
-        };
-
-        previous.onComplete(onComplete, getActorUtils().getClientDispatcher());
-        return returnPromise.future();
-    }
-
-    private <T> Future<T> combineFutureWithPossiblePriorReadOnlyTxFutures(final Future<T> future,
-            final TransactionIdentifier txId) {
-        return priorReadOnlyTxPromises.isEmpty() || priorReadOnlyTxPromises.containsKey(txId) ? future
-                // Tough luck, we need do some work
-                : combineWithPriorReadOnlyTxFutures(future, txId);
-    }
-
-    // Split out of the common path
-    private <T> Future<T> combineWithPriorReadOnlyTxFutures(final Future<T> future, final TransactionIdentifier txId) {
-        // Take a stable snapshot, and check if we raced
-        final List<Entry<TransactionIdentifier, Promise<Object>>> priorReadOnlyTxPromiseEntries =
-                new ArrayList<>(priorReadOnlyTxPromises.entrySet());
-        if (priorReadOnlyTxPromiseEntries.isEmpty()) {
-            return future;
-        }
-
-        final List<Future<Object>> priorReadOnlyTxFutures = new ArrayList<>(priorReadOnlyTxPromiseEntries.size());
-        for (Entry<TransactionIdentifier, Promise<Object>> entry: priorReadOnlyTxPromiseEntries) {
-            LOG.debug("Tx: {} - waiting on future for prior read-only Tx {}", txId, entry.getKey());
-            priorReadOnlyTxFutures.add(entry.getValue().future());
-        }
-
-        final Future<Iterable<Object>> combinedFutures = Futures.sequence(priorReadOnlyTxFutures,
-            getActorUtils().getClientDispatcher());
-
-        final Promise<T> returnPromise = Futures.promise();
-        final OnComplete<Iterable<Object>> onComplete = new OnComplete<>() {
-            @Override
-            public void onComplete(final Throwable failure, final Iterable<Object> notUsed) {
-                LOG.debug("Tx: {} - prior read-only Tx futures complete", txId);
-
-                // Complete the returned Promise with the original Future.
-                returnPromise.completeWith(future);
-            }
-        };
-
-        combinedFutures.onComplete(onComplete, getActorUtils().getClientDispatcher());
-        return returnPromise.future();
-    }
-
-    @Override
-    protected <T> void onTransactionReady(final TransactionIdentifier transaction,
-            final Collection<Future<T>> cohortFutures) {
-        final State localState = currentState;
-        checkState(localState instanceof Allocated, "Readying transaction %s while state is %s", transaction,
-            localState);
-        final TransactionIdentifier currentTx = ((Allocated)localState).getIdentifier();
-        checkState(transaction.equals(currentTx), "Readying transaction %s while %s is allocated", transaction,
-            currentTx);
-
-        // Transaction ready and we are not waiting for futures -- go to idle
-        if (cohortFutures.isEmpty()) {
-            currentState = IDLE_STATE;
-            return;
-        }
-
-        // Combine the ready Futures into 1
-        final Future<Iterable<T>> combined = Futures.sequence(cohortFutures, getActorUtils().getClientDispatcher());
-
-        // Record the we have outstanding futures
-        final State newState = new Submitted(transaction, combined);
-        currentState = newState;
-
-        // Attach a completion reset, but only if we do not allocate a transaction
-        // in-between
-        combined.onComplete(new OnComplete<Iterable<T>>() {
-            @Override
-            public void onComplete(final Throwable arg0, final Iterable<T> arg1) {
-                STATE_UPDATER.compareAndSet(TransactionChainProxy.this, newState, IDLE_STATE);
-            }
-        }, getActorUtils().getClientDispatcher());
-    }
-
-    @Override
-    protected void onTransactionContextCreated(final TransactionIdentifier transactionId) {
-        Promise<Object> promise = priorReadOnlyTxPromises.remove(transactionId);
-        if (promise != null) {
-            promise.success(null);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContext.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContext.java
deleted file mode 100644 (file)
index 549136b..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorSelection;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.Optional;
-import java.util.SortedSet;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.AbstractRead;
-import org.opendaylight.yangtools.concepts.AbstractSimpleIdentifiable;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-
-abstract class TransactionContext extends AbstractSimpleIdentifiable<TransactionIdentifier> {
-    private static final Logger LOG = LoggerFactory.getLogger(TransactionContext.class);
-
-    private final short transactionVersion;
-
-    private long modificationCount = 0;
-    private boolean handOffComplete;
-
-    TransactionContext(final TransactionIdentifier transactionIdentifier) {
-        this(transactionIdentifier, DataStoreVersions.CURRENT_VERSION);
-    }
-
-    TransactionContext(final TransactionIdentifier transactionIdentifier, final short transactionVersion) {
-        super(transactionIdentifier);
-        this.transactionVersion = transactionVersion;
-    }
-
-    final short getTransactionVersion() {
-        return transactionVersion;
-    }
-
-    final void incrementModificationCount() {
-        modificationCount++;
-    }
-
-    final void logModificationCount() {
-        LOG.debug("Total modifications on Tx {} = [ {} ]", getIdentifier(), modificationCount);
-    }
-
-    /**
-     * Invoked by {@link AbstractTransactionContextWrapper} when it has finished handing
-     * off operations to this context. From this point on, the context is responsible
-     * for throttling operations.
-     *
-     * <p>
-     * Implementations can rely on the wrapper calling this operation in a synchronized
-     * block, so they do not need to ensure visibility of this state transition themselves.
-     */
-    final void operationHandOffComplete() {
-        handOffComplete = true;
-    }
-
-    final boolean isOperationHandOffComplete() {
-        return handOffComplete;
-    }
-
-    /**
-     * A TransactionContext that uses operation limiting should return true else false.
-     *
-     * @return true if operation limiting is used, false otherwise
-     */
-    boolean usesOperationLimiting() {
-        return false;
-    }
-
-    abstract void executeDelete(YangInstanceIdentifier path, Boolean havePermit);
-
-    abstract void executeMerge(YangInstanceIdentifier path, NormalizedNode data, Boolean havePermit);
-
-    abstract void executeWrite(YangInstanceIdentifier path, NormalizedNode data, Boolean havePermit);
-
-    abstract <T> void executeRead(AbstractRead<T> readCmd, SettableFuture<T> proxyFuture, Boolean havePermit);
-
-    abstract Future<ActorSelection> readyTransaction(Boolean havePermit,
-            Optional<SortedSet<String>> participatingShardNames);
-
-    abstract Future<Object> directCommit(Boolean havePermit);
-
-    abstract void closeTransaction();
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContextCleanup.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContextCleanup.java
deleted file mode 100644 (file)
index ef8cc49..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import com.google.common.base.FinalizablePhantomReference;
-import com.google.common.base.FinalizableReferenceQueue;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * A PhantomReference that closes remote transactions for a TransactionContext when it's
- * garbage collected. This is used for read-only transactions as they're not explicitly closed
- * by clients. So the only way to detect that a transaction is no longer in use and it's safe
- * to clean up is when it's garbage collected. It's inexact as to when an instance will be GC'ed
- * but TransactionProxy instances should generally be short-lived enough to avoid being moved
- * to the old generation space and thus should be cleaned up in a timely manner as the GC
- * runs on the young generation (eden, swap1...) space much more frequently.
- */
-final class TransactionContextCleanup extends FinalizablePhantomReference<TransactionProxy> {
-    private static final Logger LOG = LoggerFactory.getLogger(TransactionContextCleanup.class);
-    /**
-     * Used to enqueue the PhantomReferences for read-only TransactionProxy instances. The
-     * FinalizableReferenceQueue is safe to use statically in an OSGi environment as it uses some
-     * trickery to clean up its internal thread when the bundle is unloaded.
-     */
-    private static final FinalizableReferenceQueue QUEUE = new FinalizableReferenceQueue();
-
-    /**
-     * This stores the TransactionProxyCleanupPhantomReference instances statically, This is
-     * necessary because PhantomReferences need a hard reference so they're not garbage collected.
-     * Once finalized, the TransactionProxyCleanupPhantomReference removes itself from this map
-     * and thus becomes eligible for garbage collection.
-     */
-    private static final Map<TransactionContext, TransactionContextCleanup> CACHE = new ConcurrentHashMap<>();
-
-    private final TransactionContext cleanup;
-
-    private TransactionContextCleanup(final TransactionProxy referent, final TransactionContext cleanup) {
-        super(referent, QUEUE);
-        this.cleanup = cleanup;
-    }
-
-    static void track(final TransactionProxy referent, final TransactionContext cleanup) {
-        final TransactionContextCleanup ret = new TransactionContextCleanup(referent, cleanup);
-        CACHE.put(cleanup, ret);
-    }
-
-    @Override
-    public void finalizeReferent() {
-        LOG.trace("Cleaning up {} Tx actors", cleanup);
-
-        if (CACHE.remove(cleanup) != null) {
-            cleanup.closeTransaction();
-        }
-    }
-
-    static void untrack(final TransactionContext cleanup) {
-        CACHE.remove(cleanup);
-    }
-}
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContextFactory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContextFactory.java
deleted file mode 100644 (file)
index 3944b04..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorSelection;
-import java.util.Collection;
-import java.util.concurrent.atomic.AtomicLong;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
-import scala.concurrent.Future;
-
-/**
- * An {@link AbstractTransactionContextFactory} which produces TransactionContext instances for single
- * transactions (ie not chained).
- */
-final class TransactionContextFactory extends AbstractTransactionContextFactory<LocalTransactionFactoryImpl> {
-    private final AtomicLong nextHistory = new AtomicLong(1);
-
-    TransactionContextFactory(final ActorUtils actorUtils, final ClientIdentifier clientId) {
-        super(actorUtils, new LocalHistoryIdentifier(clientId, 0));
-    }
-
-    @Override
-    public void close() {
-    }
-
-    @Override
-    protected LocalTransactionFactoryImpl factoryForShard(final String shardName, final ActorSelection shardLeader,
-            final ReadOnlyDataTree dataTree) {
-        return new LocalTransactionFactoryImpl(getActorUtils(), shardLeader, dataTree);
-    }
-
-    @Override
-    protected Future<PrimaryShardInfo> findPrimaryShard(final String shardName, TransactionIdentifier txId) {
-        return getActorUtils().findPrimaryShardAsync(shardName);
-    }
-
-    @Override
-    protected <T> void onTransactionReady(final TransactionIdentifier transaction,
-            final Collection<Future<T>> cohortFutures) {
-        // Transactions are disconnected, this is a no-op
-    }
-
-    DOMStoreTransactionChain createTransactionChain() {
-        return new TransactionChainProxy(this, new LocalHistoryIdentifier(getHistoryId().getClientId(),
-                nextHistory.getAndIncrement()));
-    }
-
-    @Override
-    protected void onTransactionContextCreated(final TransactionIdentifier transactionId) {
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionModificationOperation.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionModificationOperation.java
deleted file mode 100644 (file)
index eeaec6b..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static java.util.Objects.requireNonNull;
-
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * A TransactionOperation to apply a specific modification. Subclasses provide type capture of required data, so that
- * we instantiate AbstractModification subclasses for the bare minimum time required.
- */
-abstract class TransactionModificationOperation extends TransactionOperation {
-    private abstract static class AbstractDataOperation extends TransactionModificationOperation {
-        private final NormalizedNode data;
-
-        AbstractDataOperation(final YangInstanceIdentifier path, final NormalizedNode data) {
-            super(path);
-            this.data = requireNonNull(data);
-        }
-
-        final NormalizedNode data() {
-            return data;
-        }
-    }
-
-    static final class DeleteOperation extends TransactionModificationOperation {
-        DeleteOperation(final YangInstanceIdentifier path) {
-            super(path);
-        }
-
-        @Override
-        protected void invoke(final TransactionContext transactionContext, final Boolean havePermit) {
-            transactionContext.executeDelete(path(), havePermit);
-        }
-    }
-
-    static final class MergeOperation extends AbstractDataOperation {
-        MergeOperation(final YangInstanceIdentifier path, final NormalizedNode data) {
-            super(path, data);
-        }
-
-        @Override
-        protected void invoke(final TransactionContext transactionContext, final Boolean havePermit) {
-            transactionContext.executeMerge(path(), data(), havePermit);
-        }
-    }
-
-    static final class WriteOperation extends AbstractDataOperation {
-        WriteOperation(final YangInstanceIdentifier path, final NormalizedNode data) {
-            super(path, data);
-        }
-
-        @Override
-        protected void invoke(final TransactionContext transactionContext, final Boolean havePermit) {
-            transactionContext.executeWrite(path(), data(), havePermit);
-        }
-    }
-
-    private final YangInstanceIdentifier path;
-
-    TransactionModificationOperation(final YangInstanceIdentifier path) {
-        this.path = requireNonNull(path);
-    }
-
-    final YangInstanceIdentifier path() {
-        return path;
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionOperation.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionOperation.java
deleted file mode 100644 (file)
index 962d261..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import org.eclipse.jdt.annotation.Nullable;
-
-/**
- * Abstract superclass for transaction operations which should be executed
- * on a {@link TransactionContext} at a later point in time.
- */
-abstract class TransactionOperation {
-    /**
-     * Execute the delayed operation.
-     *
-     * @param transactionContext the TransactionContext
-     * @param havePermit Boolean indicator if this operation has tried and acquired a permit, null if there was no
-     *                   attempt to acquire a permit.
-     */
-    protected abstract void invoke(TransactionContext transactionContext, @Nullable Boolean havePermit);
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionProxy.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionProxy.java
deleted file mode 100644 (file)
index 16a979f..0000000
+++ /dev/null
@@ -1,390 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static com.google.common.base.Preconditions.checkState;
-import static com.google.common.base.Verify.verifyNotNull;
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Iterables;
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Optional;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.TransactionModificationOperation.DeleteOperation;
-import org.opendaylight.controller.cluster.datastore.TransactionModificationOperation.MergeOperation;
-import org.opendaylight.controller.cluster.datastore.TransactionModificationOperation.WriteOperation;
-import org.opendaylight.controller.cluster.datastore.messages.AbstractRead;
-import org.opendaylight.controller.cluster.datastore.messages.DataExists;
-import org.opendaylight.controller.cluster.datastore.messages.ReadData;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeAggregator;
-import org.opendaylight.mdsal.dom.spi.store.AbstractDOMStoreTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.builder.DataContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-import scala.concurrent.Promise;
-
-/**
- * A transaction potentially spanning multiple backend shards.
- */
-public class TransactionProxy extends AbstractDOMStoreTransaction<TransactionIdentifier>
-        implements DOMStoreReadWriteTransaction {
-    private enum TransactionState {
-        OPEN,
-        READY,
-        CLOSED,
-    }
-
-    private static final Logger LOG = LoggerFactory.getLogger(TransactionProxy.class);
-    private static final DeleteOperation ROOT_DELETE_OPERATION = new DeleteOperation(YangInstanceIdentifier.empty());
-
-    private final Map<String, AbstractTransactionContextWrapper> txContextWrappers = new TreeMap<>();
-    private final AbstractTransactionContextFactory<?> txContextFactory;
-    private final TransactionType type;
-    private TransactionState state = TransactionState.OPEN;
-
-    @VisibleForTesting
-    public TransactionProxy(final AbstractTransactionContextFactory<?> txContextFactory, final TransactionType type) {
-        super(txContextFactory.nextIdentifier(), txContextFactory.getActorUtils().getDatastoreContext()
-                .isTransactionDebugContextEnabled());
-        this.txContextFactory = txContextFactory;
-        this.type = requireNonNull(type);
-
-        LOG.debug("New {} Tx - {}", type, getIdentifier());
-    }
-
-    @Override
-    public FluentFuture<Boolean> exists(final YangInstanceIdentifier path) {
-        return executeRead(shardNameFromIdentifier(path), new DataExists(path, DataStoreVersions.CURRENT_VERSION));
-    }
-
-    private <T> FluentFuture<T> executeRead(final String shardName, final AbstractRead<T> readCmd) {
-        checkState(type != TransactionType.WRITE_ONLY, "Reads from write-only transactions are not allowed");
-
-        LOG.trace("Tx {} {} {}", getIdentifier(), readCmd.getClass().getSimpleName(), readCmd.getPath());
-
-        final SettableFuture<T> proxyFuture = SettableFuture.create();
-        AbstractTransactionContextWrapper contextWrapper = getContextWrapper(shardName);
-        contextWrapper.maybeExecuteTransactionOperation(new TransactionOperation() {
-            @Override
-            public void invoke(final TransactionContext transactionContext, final Boolean havePermit) {
-                transactionContext.executeRead(readCmd, proxyFuture, havePermit);
-            }
-        });
-
-        return FluentFuture.from(proxyFuture);
-    }
-
-    @Override
-    public FluentFuture<Optional<NormalizedNode>> read(final YangInstanceIdentifier path) {
-        checkState(type != TransactionType.WRITE_ONLY, "Reads from write-only transactions are not allowed");
-        requireNonNull(path, "path should not be null");
-
-        LOG.trace("Tx {} read {}", getIdentifier(), path);
-        return path.isEmpty() ? readAllData() : singleShardRead(shardNameFromIdentifier(path), path);
-    }
-
-    private FluentFuture<Optional<NormalizedNode>> singleShardRead(final String shardName,
-            final YangInstanceIdentifier path) {
-        return executeRead(shardName, new ReadData(path, DataStoreVersions.CURRENT_VERSION));
-    }
-
-    private FluentFuture<Optional<NormalizedNode>> readAllData() {
-        final Set<String> allShardNames = txContextFactory.getActorUtils().getConfiguration().getAllShardNames();
-        final Collection<FluentFuture<Optional<NormalizedNode>>> futures = new ArrayList<>(allShardNames.size());
-
-        for (String shardName : allShardNames) {
-            futures.add(singleShardRead(shardName, YangInstanceIdentifier.empty()));
-        }
-
-        final ListenableFuture<List<Optional<NormalizedNode>>> listFuture = Futures.allAsList(futures);
-        final ListenableFuture<Optional<NormalizedNode>> aggregateFuture;
-
-        aggregateFuture = Futures.transform(listFuture, input -> {
-            try {
-                return NormalizedNodeAggregator.aggregate(YangInstanceIdentifier.empty(), input,
-                        txContextFactory.getActorUtils().getSchemaContext(),
-                        txContextFactory.getActorUtils().getDatastoreContext().getLogicalStoreType());
-            } catch (DataValidationFailedException e) {
-                throw new IllegalArgumentException("Failed to aggregate", e);
-            }
-        }, MoreExecutors.directExecutor());
-
-        return FluentFuture.from(aggregateFuture);
-    }
-
-    @Override
-    public void delete(final YangInstanceIdentifier path) {
-        checkModificationState("delete", path);
-
-        if (path.isEmpty()) {
-            deleteAllData();
-        } else {
-            executeModification(new DeleteOperation(path));
-        }
-    }
-
-    private void deleteAllData() {
-        for (String shardName : getActorUtils().getConfiguration().getAllShardNames()) {
-            getContextWrapper(shardName).maybeExecuteTransactionOperation(ROOT_DELETE_OPERATION);
-        }
-    }
-
-    @Override
-    public void merge(final YangInstanceIdentifier path, final NormalizedNode data) {
-        checkModificationState("merge", path);
-
-        if (path.isEmpty()) {
-            mergeAllData(checkRootData(data));
-        } else {
-            executeModification(new MergeOperation(path, data));
-        }
-    }
-
-    private void mergeAllData(final ContainerNode rootData) {
-        // Populate requests for individual shards that are being touched
-        final Map<String, DataContainerNodeBuilder<NodeIdentifier, ContainerNode>> rootBuilders = new HashMap<>();
-        for (DataContainerChild child : rootData.body()) {
-            final String shardName = shardNameFromRootChild(child);
-            rootBuilders.computeIfAbsent(shardName,
-                unused -> Builders.containerBuilder().withNodeIdentifier(rootData.getIdentifier()))
-                .addChild(child);
-        }
-
-        // Now dispatch all merges
-        for (Entry<String, DataContainerNodeBuilder<NodeIdentifier, ContainerNode>> entry : rootBuilders.entrySet()) {
-            getContextWrapper(entry.getKey()).maybeExecuteTransactionOperation(new MergeOperation(
-                YangInstanceIdentifier.empty(), entry.getValue().build()));
-        }
-    }
-
-    @Override
-    public void write(final YangInstanceIdentifier path, final NormalizedNode data) {
-        checkModificationState("write", path);
-
-        if (path.isEmpty()) {
-            writeAllData(checkRootData(data));
-        } else {
-            executeModification(new WriteOperation(path, data));
-        }
-    }
-
-    private void writeAllData(final ContainerNode rootData) {
-        // Open builders for all shards
-        final Map<String, DataContainerNodeBuilder<NodeIdentifier, ContainerNode>> rootBuilders = new HashMap<>();
-        for (String shardName : getActorUtils().getConfiguration().getAllShardNames()) {
-            rootBuilders.put(shardName, Builders.containerBuilder().withNodeIdentifier(rootData.getIdentifier()));
-        }
-
-        // Now distribute children as needed
-        for (DataContainerChild child : rootData.body()) {
-            final String shardName = shardNameFromRootChild(child);
-            verifyNotNull(rootBuilders.get(shardName), "Failed to find builder for %s", shardName).addChild(child);
-        }
-
-        // Now dispatch all writes
-        for (Entry<String, DataContainerNodeBuilder<NodeIdentifier, ContainerNode>> entry : rootBuilders.entrySet()) {
-            getContextWrapper(entry.getKey()).maybeExecuteTransactionOperation(new WriteOperation(
-                YangInstanceIdentifier.empty(), entry.getValue().build()));
-        }
-    }
-
-    private void executeModification(final TransactionModificationOperation operation) {
-        getContextWrapper(operation.path()).maybeExecuteTransactionOperation(operation);
-    }
-
-    private static ContainerNode checkRootData(final NormalizedNode data) {
-        // Root has to be a container
-        checkArgument(data instanceof ContainerNode, "Invalid root data %s", data);
-        return (ContainerNode) data;
-    }
-
-    private void checkModificationState(final String opName, final YangInstanceIdentifier path) {
-        checkState(type != TransactionType.READ_ONLY, "Modification operation on read-only transaction is not allowed");
-        checkState(state == TransactionState.OPEN, "Transaction is sealed - further modifications are not allowed");
-        LOG.trace("Tx {} {} {}", getIdentifier(), opName, path);
-    }
-
-    private boolean seal(final TransactionState newState) {
-        if (state == TransactionState.OPEN) {
-            state = newState;
-            return true;
-        }
-        return false;
-    }
-
-    @Override
-    public final void close() {
-        if (!seal(TransactionState.CLOSED)) {
-            checkState(state == TransactionState.CLOSED, "Transaction %s is ready, it cannot be closed",
-                getIdentifier());
-            // Idempotent no-op as per AutoCloseable recommendation
-            return;
-        }
-
-        for (AbstractTransactionContextWrapper contextWrapper : txContextWrappers.values()) {
-            contextWrapper.maybeExecuteTransactionOperation(new TransactionOperation() {
-                @Override
-                public void invoke(final TransactionContext transactionContext, final Boolean havePermit) {
-                    transactionContext.closeTransaction();
-                }
-            });
-        }
-
-
-        txContextWrappers.clear();
-    }
-
-    @Override
-    public final AbstractThreePhaseCommitCohort<?> ready() {
-        checkState(type != TransactionType.READ_ONLY, "Read-only transactions cannot be readied");
-
-        final boolean success = seal(TransactionState.READY);
-        checkState(success, "Transaction %s is %s, it cannot be readied", getIdentifier(), state);
-
-        LOG.debug("Tx {} Readying {} components for commit", getIdentifier(), txContextWrappers.size());
-
-        final AbstractThreePhaseCommitCohort<?> ret;
-        switch (txContextWrappers.size()) {
-            case 0:
-                ret = NoOpDOMStoreThreePhaseCommitCohort.INSTANCE;
-                break;
-            case 1:
-                final Entry<String, AbstractTransactionContextWrapper> e = Iterables.getOnlyElement(
-                        txContextWrappers.entrySet());
-                ret = createSingleCommitCohort(e.getKey(), e.getValue());
-                break;
-            default:
-                ret = createMultiCommitCohort();
-        }
-
-        txContextFactory.onTransactionReady(getIdentifier(), ret.getCohortFutures());
-
-        final Throwable debugContext = getDebugContext();
-        return debugContext == null ? ret : new DebugThreePhaseCommitCohort(getIdentifier(), ret, debugContext);
-    }
-
-    @SuppressWarnings({ "rawtypes", "unchecked" })
-    private AbstractThreePhaseCommitCohort<?> createSingleCommitCohort(final String shardName,
-            final AbstractTransactionContextWrapper contextWrapper) {
-
-        LOG.debug("Tx {} Readying transaction for shard {}", getIdentifier(), shardName);
-
-        final OperationCallback.Reference operationCallbackRef =
-                new OperationCallback.Reference(OperationCallback.NO_OP_CALLBACK);
-
-        final TransactionContext transactionContext = contextWrapper.getTransactionContext();
-        final Future future;
-        if (transactionContext == null) {
-            final Promise promise = akka.dispatch.Futures.promise();
-            contextWrapper.maybeExecuteTransactionOperation(new TransactionOperation() {
-                @Override
-                public void invoke(final TransactionContext newTransactionContext, final Boolean havePermit) {
-                    promise.completeWith(getDirectCommitFuture(newTransactionContext, operationCallbackRef,
-                        havePermit));
-                }
-            });
-            future = promise.future();
-        } else {
-            // avoid the creation of a promise and a TransactionOperation
-            future = getDirectCommitFuture(transactionContext, operationCallbackRef, null);
-        }
-
-        return new SingleCommitCohortProxy(txContextFactory.getActorUtils(), future, getIdentifier(),
-            operationCallbackRef);
-    }
-
-    private Future<?> getDirectCommitFuture(final TransactionContext transactionContext,
-            final OperationCallback.Reference operationCallbackRef, final Boolean havePermit) {
-        TransactionRateLimitingCallback rateLimitingCallback = new TransactionRateLimitingCallback(
-                txContextFactory.getActorUtils());
-        operationCallbackRef.set(rateLimitingCallback);
-        rateLimitingCallback.run();
-        return transactionContext.directCommit(havePermit);
-    }
-
-    private AbstractThreePhaseCommitCohort<ActorSelection> createMultiCommitCohort() {
-
-        final List<ThreePhaseCommitCohortProxy.CohortInfo> cohorts = new ArrayList<>(txContextWrappers.size());
-        final Optional<SortedSet<String>> shardNames = Optional.of(new TreeSet<>(txContextWrappers.keySet()));
-        for (Entry<String, AbstractTransactionContextWrapper> e : txContextWrappers.entrySet()) {
-            LOG.debug("Tx {} Readying transaction for shard {}", getIdentifier(), e.getKey());
-
-            final AbstractTransactionContextWrapper wrapper = e.getValue();
-
-            // The remote tx version is obtained the via TransactionContext which may not be available yet so
-            // we pass a Supplier to dynamically obtain it. Once the ready Future is resolved the
-            // TransactionContext is available.
-            cohorts.add(new ThreePhaseCommitCohortProxy.CohortInfo(wrapper.readyTransaction(shardNames),
-                () -> wrapper.getTransactionContext().getTransactionVersion()));
-        }
-
-        return new ThreePhaseCommitCohortProxy(txContextFactory.getActorUtils(), cohorts, getIdentifier());
-    }
-
-    private String shardNameFromRootChild(final DataContainerChild child) {
-        return shardNameFromIdentifier(YangInstanceIdentifier.create(child.getIdentifier()));
-    }
-
-    private String shardNameFromIdentifier(final YangInstanceIdentifier path) {
-        return getActorUtils().getShardStrategyFactory().getStrategy(path).findShard(path);
-    }
-
-    private AbstractTransactionContextWrapper getContextWrapper(final YangInstanceIdentifier path) {
-        return getContextWrapper(shardNameFromIdentifier(path));
-    }
-
-    private AbstractTransactionContextWrapper getContextWrapper(final String shardName) {
-        final AbstractTransactionContextWrapper existing = txContextWrappers.get(shardName);
-        if (existing != null) {
-            return existing;
-        }
-
-        final AbstractTransactionContextWrapper fresh = txContextFactory.newTransactionContextWrapper(this, shardName);
-        txContextWrappers.put(shardName, fresh);
-        return fresh;
-    }
-
-    TransactionType getType() {
-        return type;
-    }
-
-    boolean isReady() {
-        return state != TransactionState.OPEN;
-    }
-
-    final ActorUtils getActorUtils() {
-        return txContextFactory.getActorUtils();
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionReadyReplyMapper.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionReadyReplyMapper.java
deleted file mode 100644 (file)
index f5eb0e4..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import akka.dispatch.Mapper;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-
-/**
- * A {@link Mapper} extracting the {@link ActorSelection} pointing to the actor which
- * is backing a particular transaction.
- *
- * <p>
- * This class is not for general consumption. It is public only to support the pre-lithium compatibility
- * package.
- * TODO: once we remove compatibility, make this class package-private and final.
- */
-public class TransactionReadyReplyMapper extends Mapper<Object, ActorSelection> {
-    protected static final Mapper<Throwable, Throwable> SAME_FAILURE_TRANSFORMER = new Mapper<Throwable, Throwable>() {
-        @Override
-        public Throwable apply(final Throwable failure) {
-            return failure;
-        }
-    };
-    private static final Logger LOG = LoggerFactory.getLogger(TransactionReadyReplyMapper.class);
-    private final TransactionIdentifier identifier;
-    private final ActorUtils actorUtils;
-
-    protected TransactionReadyReplyMapper(final ActorUtils actorUtils, final TransactionIdentifier identifier) {
-        this.actorUtils = requireNonNull(actorUtils);
-        this.identifier = requireNonNull(identifier);
-    }
-
-    protected final ActorUtils getActorUtils() {
-        return actorUtils;
-    }
-
-    protected String extractCohortPathFrom(final ReadyTransactionReply readyTxReply) {
-        return readyTxReply.getCohortPath();
-    }
-
-    @Override
-    public final ActorSelection checkedApply(final Object serializedReadyReply) {
-        LOG.debug("Tx {} readyTransaction", identifier);
-
-        // At this point the ready operation succeeded and we need to extract the cohort
-        // actor path from the reply.
-        if (ReadyTransactionReply.isSerializedType(serializedReadyReply)) {
-            ReadyTransactionReply readyTxReply = ReadyTransactionReply.fromSerializable(serializedReadyReply);
-            return actorUtils.actorSelection(extractCohortPathFrom(readyTxReply));
-        }
-
-        // Throwing an exception here will fail the Future.
-        throw new IllegalArgumentException(String.format("%s: Invalid reply type %s",
-                identifier, serializedReadyReply.getClass()));
-    }
-
-    static Future<ActorSelection> transform(final Future<Object> readyReplyFuture, final ActorUtils actorUtils,
-            final TransactionIdentifier identifier) {
-        return readyReplyFuture.transform(new TransactionReadyReplyMapper(actorUtils, identifier),
-            SAME_FAILURE_TRANSFORMER, actorUtils.getClientDispatcher());
-    }
-}
index 649dae528949397b73b1cb8367156e0057f85ba0..19ca628d262666d6e56a63ee37850e4ca137dbf8 100644 (file)
@@ -12,14 +12,12 @@ public enum TransactionType {
     WRITE_ONLY,
     READ_WRITE;
 
-    // Cache all values
-    private static final TransactionType[] VALUES = values();
-
     public static TransactionType fromInt(final int type) {
-        try {
-            return VALUES[type];
-        } catch (IndexOutOfBoundsException e) {
-            throw new IllegalArgumentException("In TransactionType enum value " + type, e);
-        }
+        return switch (type) {
+            case 0 -> READ_ONLY;
+            case 1 -> WRITE_ONLY;
+            case 2 -> READ_WRITE;
+            default -> throw new IllegalArgumentException("In TransactionType enum value " + type);
+        };
     }
 }
\ No newline at end of file
index 33d24156827b599962ff3f8e2f62c14f5fa51792..2fdf3a9d622a927a75272c9f123650cf74689059 100644 (file)
@@ -15,10 +15,11 @@ import akka.actor.PoisonPill;
 import akka.actor.Props;
 import com.google.common.annotations.VisibleForTesting;
 import java.util.concurrent.TimeUnit;
+import org.eclipse.jdt.annotation.NonNullByDefault;
 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
 import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeNotificationListenerRegistration;
 import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeNotificationListenerRegistrationReply;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import scala.concurrent.duration.FiniteDuration;
 
 /**
@@ -30,10 +31,9 @@ public final class DataTreeNotificationListenerRegistrationActor extends Abstrac
     @VisibleForTesting
     static long killDelay = TimeUnit.MILLISECONDS.convert(5, TimeUnit.SECONDS);
 
-    private ListenerRegistration<?> registration;
-    private Runnable onClose;
+    private SetRegistration registration = null;
+    private Cancellable killSchedule = null;
     private boolean closed;
-    private Cancellable killSchedule;
 
     @Override
     protected void handleReceive(final Object message) {
@@ -42,9 +42,8 @@ public final class DataTreeNotificationListenerRegistrationActor extends Abstrac
             if (isValidSender(getSender())) {
                 getSender().tell(CloseDataTreeNotificationListenerRegistrationReply.getInstance(), getSelf());
             }
-        } else if (message instanceof SetRegistration) {
-            registration = ((SetRegistration)message).registration;
-            onClose = ((SetRegistration)message).onClose;
+        } else if (message instanceof SetRegistration setRegistration) {
+            registration = setRegistration;
             if (closed) {
                 closeListenerRegistration();
             }
@@ -55,10 +54,12 @@ public final class DataTreeNotificationListenerRegistrationActor extends Abstrac
 
     private void closeListenerRegistration() {
         closed = true;
-        if (registration != null) {
-            registration.close();
-            onClose.run();
+
+        final var reg = registration;
+        if (reg != null) {
             registration = null;
+            reg.registration.close();
+            reg.onClose.run();
 
             if (killSchedule == null) {
                 killSchedule = getContext().system().scheduler().scheduleOnce(FiniteDuration.create(killDelay,
@@ -72,13 +73,11 @@ public final class DataTreeNotificationListenerRegistrationActor extends Abstrac
         return Props.create(DataTreeNotificationListenerRegistrationActor.class);
     }
 
-    public static class SetRegistration {
-        private final ListenerRegistration<?> registration;
-        private final Runnable onClose;
-
-        public SetRegistration(final ListenerRegistration<?> registration, final Runnable onClose) {
-            this.registration = requireNonNull(registration);
-            this.onClose = requireNonNull(onClose);
+    @NonNullByDefault
+    public record SetRegistration(Registration registration, Runnable onClose) {
+        public SetRegistration {
+            requireNonNull(registration);
+            requireNonNull(onClose);
         }
     }
 }
index ccaa48b8d5ac6fb828a62cede7d1e175e93a7b4f..5eabe94399188f23b1083452841713e788fc41c5 100644 (file)
@@ -24,16 +24,15 @@ import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
 import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload;
 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNodeContainer;
 import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
 import org.opendaylight.yangtools.yang.data.codec.gson.JSONCodecFactorySupplier;
 import org.opendaylight.yangtools.yang.data.codec.gson.JSONNormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.opendaylight.yangtools.yang.model.util.SchemaInferenceStack;
 
@@ -99,7 +98,7 @@ public final class JsonExportActor extends AbstractUntypedActor {
         final Path filePath = snapshotDir.resolve(exportSnapshot.id + "-snapshot.json");
         LOG.debug("Creating JSON file : {}", filePath);
 
-        final NormalizedNode root = exportSnapshot.dataTreeCandidate.getRootNode().getDataAfter().get();
+        final NormalizedNode root = exportSnapshot.dataTreeCandidate.getRootNode().getDataAfter();
         checkState(root instanceof NormalizedNodeContainer, "Unexpected root %s", root);
 
         writeSnapshot(filePath, (NormalizedNodeContainer<?>) root);
@@ -124,8 +123,7 @@ public final class JsonExportActor extends AbstractUntypedActor {
         try (JsonWriter jsonWriter = new JsonWriter(Files.newBufferedWriter(path))) {
             jsonWriter.beginObject();
 
-            try (NormalizedNodeWriter nnWriter = NormalizedNodeWriter.forStreamWriter(
-                JSONNormalizedNodeStreamWriter.createNestedWriter(
+            try (var nnWriter = NormalizedNodeWriter.forStreamWriter(JSONNormalizedNodeStreamWriter.createNestedWriter(
                     JSONCodecFactorySupplier.RFC7951.getShared(schemaContext),
                     SchemaInferenceStack.of(schemaContext).toInference(), null, jsonWriter),
                 true)) {
@@ -144,11 +142,10 @@ public final class JsonExportActor extends AbstractUntypedActor {
         try (JsonWriter jsonWriter = new JsonWriter(Files.newBufferedWriter(path))) {
             jsonWriter.beginObject().name("Entries");
             jsonWriter.beginArray();
-            for (ReplicatedLogEntry entry : entries) {
-                final Payload data = entry.getData();
-                if (data instanceof CommitTransactionPayload) {
-                    final CommitTransactionPayload payload = (CommitTransactionPayload) entry.getData();
-                    final DataTreeCandidate candidate = payload.getCandidate().getValue().getCandidate();
+            for (var entry : entries) {
+                final var data = entry.getData();
+                if (data instanceof CommitTransactionPayload payload) {
+                    final var candidate = payload.getCandidate().candidate();
                     writeNode(jsonWriter, candidate);
                 } else {
                     jsonWriter.beginObject().name("Payload").value(data.toString()).endObject();
@@ -162,21 +159,18 @@ public final class JsonExportActor extends AbstractUntypedActor {
     }
 
     private static void writeNode(final JsonWriter writer, final DataTreeCandidate candidate) throws IOException {
-        writer.beginObject();
-        writer.name("Entry");
-        writer.beginArray();
+        writer.beginObject().name("Entry").beginArray();
         doWriteNode(writer, candidate.getRootPath(), candidate.getRootNode());
-        writer.endArray();
-        writer.endObject();
+        writer.endArray().endObject();
     }
 
     private static void doWriteNode(final JsonWriter writer, final YangInstanceIdentifier path,
             final DataTreeCandidateNode node) throws IOException {
-        switch (node.getModificationType()) {
+        switch (node.modificationType()) {
             case APPEARED:
             case DISAPPEARED:
             case SUBTREE_MODIFIED:
-                NodeIterator iterator = new NodeIterator(null, path, node.getChildNodes().iterator());
+                NodeIterator iterator = new NodeIterator(null, path, node.childNodes().iterator());
                 do {
                     iterator = iterator.next(writer);
                 } while (iterator != null);
@@ -193,14 +187,14 @@ public final class JsonExportActor extends AbstractUntypedActor {
 
     private static void outputNodeInfo(final JsonWriter writer, final YangInstanceIdentifier path,
                                        final DataTreeCandidateNode node) throws IOException {
-        final ModificationType modificationType = node.getModificationType();
+        final ModificationType modificationType = node.modificationType();
 
         writer.beginObject().name("Node");
         writer.beginArray();
         writer.beginObject().name("Path").value(path.toString()).endObject();
         writer.beginObject().name("ModificationType").value(modificationType.toString()).endObject();
         if (modificationType == ModificationType.WRITE) {
-            writer.beginObject().name("Data").value(node.getDataAfter().get().body().toString()).endObject();
+            writer.beginObject().name("Data").value(node.getDataAfter().body().toString()).endObject();
         }
         writer.endArray();
         writer.endObject();
@@ -212,7 +206,7 @@ public final class JsonExportActor extends AbstractUntypedActor {
         writer.beginArray();
         writer.beginObject().name("Path").value(path.toString()).endObject();
         writer.beginObject().name("ModificationType")
-                .value("UNSUPPORTED MODIFICATION: " + node.getModificationType()).endObject();
+                .value("UNSUPPORTED MODIFICATION: " + node.modificationType()).endObject();
         writer.endArray();
         writer.endObject();
     }
@@ -239,14 +233,14 @@ public final class JsonExportActor extends AbstractUntypedActor {
 
         NodeIterator next(final JsonWriter writer) throws IOException {
             while (iterator.hasNext()) {
-                final DataTreeCandidateNode node = iterator.next();
-                final YangInstanceIdentifier child = path.node(node.getIdentifier());
+                final var node = iterator.next();
+                final var child = path.node(node.name());
 
-                switch (node.getModificationType()) {
+                switch (node.modificationType()) {
                     case APPEARED:
                     case DISAPPEARED:
                     case SUBTREE_MODIFIED:
-                        return new NodeIterator(this, child, node.getChildNodes().iterator());
+                        return new NodeIterator(this, child, node.childNodes().iterator());
                     case DELETE:
                     case UNMODIFIED:
                     case WRITE:
index d7d380830fd81b34502fe606ce3ca1e63da1113a..91da59d3bae91faf6628abb3b7853cd2ab042e5f 100644 (file)
@@ -76,7 +76,7 @@ public final class ShardSnapshotActor extends AbstractUntypedActorWithMetering {
     private void onSerializeSnapshot(final SerializeSnapshot request) {
         Optional<OutputStream> installSnapshotStream = request.getInstallSnapshotStream();
         if (installSnapshotStream.isPresent()) {
-            try (ObjectOutputStream out = getOutputStream(installSnapshotStream.get())) {
+            try (ObjectOutputStream out = getOutputStream(installSnapshotStream.orElseThrow())) {
                 request.getSnapshot().serialize(out);
             } catch (IOException e) {
                 // TODO - we should communicate the failure in the CaptureSnapshotReply.
index 4325c7f268a0fb7c6cc0d8edf0d983648d0d19d7..70f2ccb69f9fed8f017cb525046e557203b640bb 100644 (file)
@@ -19,6 +19,7 @@ public interface Configuration {
     /**
      * Returns all the shard names that belong on the member by the given name.
      */
+    // FIXME: return Set here
     @NonNull Collection<String> getMemberShardNames(@NonNull MemberName memberName);
 
     /**
@@ -34,6 +35,7 @@ public interface Configuration {
     /**
      * Returns the member replicas for the given shard name.
      */
+    // FIXME: return Set here
     @NonNull Collection<MemberName> getMembersFromShardName(@NonNull String shardName);
 
     /**
@@ -54,6 +56,7 @@ public interface Configuration {
     /**
      * Returns a unique set of all member names configured for all shards.
      */
+    // FIXME: return Set here
     Collection<MemberName> getUniqueMemberNamesForAllShards();
 
     /*
index 1ede88c3cb91c74bbfac078937e1b99868859aed..d0e8d875f65d4ba1f765a969c85107c156c3f5bc 100644 (file)
@@ -11,18 +11,20 @@ import static java.util.Objects.requireNonNull;
 
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Set;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
 
+// FIXME: Non-final for testing
 public class ConfigurationImpl implements Configuration {
     private volatile Map<String, ModuleConfig> moduleConfigMap;
 
@@ -35,16 +37,17 @@ public class ConfigurationImpl implements Configuration {
         this(new FileModuleShardConfigProvider(moduleShardsConfigPath, modulesConfigPath));
     }
 
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Subclassed for testing")
     public ConfigurationImpl(final ModuleShardConfigProvider provider) {
         ImmutableMap.Builder<String, ModuleConfig> mapBuilder = ImmutableMap.builder();
-        for (Map.Entry<String, ModuleConfig.Builder> e: provider.retrieveModuleConfigs(this).entrySet()) {
+        for (Entry<String, ModuleConfig.Builder> e: provider.retrieveModuleConfigs(this).entrySet()) {
             mapBuilder.put(e.getKey(), e.getValue().build());
         }
 
-        this.moduleConfigMap = mapBuilder.build();
+        moduleConfigMap = mapBuilder.build();
 
-        this.allShardNames = createAllShardNames(moduleConfigMap.values());
-        this.namespaceToModuleName = createNamespaceToModuleName(moduleConfigMap.values());
+        allShardNames = createAllShardNames(moduleConfigMap.values());
+        namespaceToModuleName = createNamespaceToModuleName(moduleConfigMap.values());
     }
 
     private static Set<String> createAllShardNames(final Iterable<ModuleConfig> moduleConfigs) {
@@ -121,7 +124,7 @@ public class ConfigurationImpl implements Configuration {
             }
         }
 
-        return Collections.emptyList();
+        return List.of();
     }
 
     private static void checkNotNullShardName(final String shardName) {
index 7c206adc5859cc5117894ee72eb6e5bb1f62db9a..74d100540a1f807807db62578a1ab3f397b269e2 100644 (file)
@@ -52,19 +52,8 @@ public class ShardIdentifier {
             return false;
         }
 
-        ShardIdentifier that = (ShardIdentifier) obj;
-
-        if (!memberName.equals(that.memberName)) {
-            return false;
-        }
-        if (!shardName.equals(that.shardName)) {
-            return false;
-        }
-        if (!type.equals(that.type)) {
-            return false;
-        }
-
-        return true;
+        final var that = (ShardIdentifier) obj;
+        return memberName.equals(that.memberName) && shardName.equals(that.shardName) && type.equals(that.type);
     }
 
     @Override
@@ -103,17 +92,17 @@ public class ShardIdentifier {
         }
 
         public Builder shardName(final String newShardName) {
-            this.shardName = newShardName;
+            shardName = newShardName;
             return this;
         }
 
         public Builder memberName(final MemberName newMemberName) {
-            this.memberName = newMemberName;
+            memberName = newMemberName;
             return this;
         }
 
         public Builder type(final String newType) {
-            this.type = newType;
+            type = newType;
             return this;
         }
 
index 880ba99dbd6f1dcf1be16abd106b1d0dfb37d530..bb47e7c838b58da54a0962d57e147322ec855d00 100644 (file)
@@ -11,26 +11,19 @@ package org.opendaylight.controller.cluster.datastore.identifiers;
 public class ShardManagerIdentifier {
     private final String type;
 
-    public ShardManagerIdentifier(String type) {
+    public ShardManagerIdentifier(final String type) {
         this.type = type;
     }
 
     @Override
-    public boolean equals(Object obj) {
+    public boolean equals(final Object obj) {
         if (this == obj) {
             return true;
         }
         if (obj == null || getClass() != obj.getClass()) {
             return false;
         }
-
-        ShardManagerIdentifier that = (ShardManagerIdentifier) obj;
-
-        if (!type.equals(that.type)) {
-            return false;
-        }
-
-        return true;
+        return type.equals(((ShardManagerIdentifier) obj).type);
     }
 
     @Override
@@ -49,14 +42,13 @@ public class ShardManagerIdentifier {
     public static class Builder {
         private String type;
 
-        public Builder type(String newType) {
-            this.type = newType;
+        public Builder type(final String newType) {
+            type = newType;
             return this;
         }
 
         public ShardManagerIdentifier build() {
-            return new ShardManagerIdentifier(this.type);
+            return new ShardManagerIdentifier(type);
         }
-
     }
 }
index 25c13989d599a153a3680d78336f4662984a894f..d0f1d3e7e1096cbb9496d789a153bfe0326e8076 100644 (file)
@@ -5,28 +5,29 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
 import com.google.common.base.Preconditions;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
-public class AbortTransaction extends AbstractThreePhaseCommitMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class AbortTransaction extends AbstractThreePhaseCommitMessage {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public AbortTransaction() {
     }
 
-    public AbortTransaction(TransactionIdentifier transactionID, final short version) {
+    public AbortTransaction(final TransactionIdentifier transactionID, final short version) {
         super(transactionID, version);
     }
 
-    public static AbortTransaction fromSerializable(Object serializable) {
+    public static AbortTransaction fromSerializable(final Object serializable) {
         Preconditions.checkArgument(serializable instanceof AbortTransaction);
         return (AbortTransaction)serializable;
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof AbortTransaction;
     }
 }
index 3b58458e1a3dca93fd37ba97d01a05a64c26cc65..911d8cf0589f4fe9e4c5a3e7ff16d239d2f3131c 100644 (file)
@@ -5,26 +5,28 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 
-public class AbortTransactionReply extends VersionedExternalizableMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class AbortTransactionReply extends VersionedExternalizableMessage {
+    @java.io.Serial
+    private static final long serialVersionUID = 7251132353204199793L;
     private static final AbortTransactionReply INSTANCE = new AbortTransactionReply();
 
     public AbortTransactionReply() {
     }
 
-    private AbortTransactionReply(short version) {
+    private AbortTransactionReply(final short version) {
         super(version);
     }
 
-    public static AbortTransactionReply instance(short version) {
+    public static AbortTransactionReply instance(final short version) {
         return version == DataStoreVersions.CURRENT_VERSION ? INSTANCE : new AbortTransactionReply(version);
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof AbortTransactionReply;
     }
 }
index dd4c9b8b01f207cb4dfba7792c590b2e47a1d24a..00aa7fa64b819f9662679e25d27b6954b6a630f3 100644 (file)
@@ -23,6 +23,7 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
  * @author gwu
  *
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public abstract class AbstractRead<T> extends VersionedExternalizableMessage {
     private static final long serialVersionUID = 1L;
 
index 3b45e642cadddfe36c853d3376ad1909133f86e5..6296c280a2a292af329018eed47282cb17e46930 100644 (file)
@@ -19,6 +19,7 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public abstract class AbstractThreePhaseCommitMessage extends VersionedExternalizableMessage {
     private static final long serialVersionUID = 1L;
 
index 09c5b739cf014791b099eab69bc7c73c314ec561..cd6e0d8cfa91da756f616e8c5244465feb7e1e6b 100644 (file)
@@ -7,11 +7,14 @@
  */
 package org.opendaylight.controller.cluster.datastore.messages;
 
-import java.io.Serializable;
+import static java.util.Objects.requireNonNull;
 
-public class ActorInitialized implements Serializable {
-    private static final long serialVersionUID = 1L;
+import akka.actor.ActorRef;
+import org.eclipse.jdt.annotation.NonNullByDefault;
 
-    public ActorInitialized() {
+@NonNullByDefault
+public record ActorInitialized(ActorRef actorRef) {
+    public ActorInitialized {
+        requireNonNull(actorRef);
     }
 }
index 77d2687ccba2a723dd5041891c8c1d8b4dbde566..4e7b40ab1f1393cc762ee636479cce2168969c3b 100644 (file)
@@ -26,7 +26,9 @@ import org.opendaylight.controller.cluster.datastore.modification.MutableComposi
  *
  * @author Thomas Pantelis
  */
-public class BatchedModifications extends MutableCompositeModification {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class BatchedModifications extends MutableCompositeModification {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private boolean ready;
@@ -39,7 +41,7 @@ public class BatchedModifications extends MutableCompositeModification {
     public BatchedModifications() {
     }
 
-    public BatchedModifications(TransactionIdentifier transactionId, short version) {
+    public BatchedModifications(final TransactionIdentifier transactionId, final short version) {
         super(version);
         this.transactionId = requireNonNull(transactionId, "transactionID can't be null");
     }
@@ -48,10 +50,10 @@ public class BatchedModifications extends MutableCompositeModification {
         return ready;
     }
 
-    public void setReady(Optional<SortedSet<String>> possibleParticipatingShardNames) {
-        this.ready = true;
-        this.participatingShardNames = requireNonNull(possibleParticipatingShardNames).orElse(null);
-        Preconditions.checkArgument(this.participatingShardNames == null || this.participatingShardNames.size() > 1);
+    public void setReady(final Optional<SortedSet<String>> possibleParticipatingShardNames) {
+        ready = true;
+        participatingShardNames = requireNonNull(possibleParticipatingShardNames).orElse(null);
+        Preconditions.checkArgument(participatingShardNames == null || participatingShardNames.size() > 1);
     }
 
     public void setReady() {
@@ -66,7 +68,7 @@ public class BatchedModifications extends MutableCompositeModification {
         return doCommitOnReady;
     }
 
-    public void setDoCommitOnReady(boolean doCommitOnReady) {
+    public void setDoCommitOnReady(final boolean doCommitOnReady) {
         this.doCommitOnReady = doCommitOnReady;
     }
 
@@ -74,7 +76,7 @@ public class BatchedModifications extends MutableCompositeModification {
         return totalMessagesSent;
     }
 
-    public void setTotalMessagesSent(int totalMessagesSent) {
+    public void setTotalMessagesSent(final int totalMessagesSent) {
         this.totalMessagesSent = totalMessagesSent;
     }
 
@@ -83,7 +85,7 @@ public class BatchedModifications extends MutableCompositeModification {
     }
 
     @Override
-    public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
         super.readExternal(in);
         transactionId = TransactionIdentifier.readFrom(in);
         ready = in.readBoolean();
@@ -104,7 +106,7 @@ public class BatchedModifications extends MutableCompositeModification {
     }
 
     @Override
-    public void writeExternal(ObjectOutput out) throws IOException {
+    public void writeExternal(final ObjectOutput out) throws IOException {
         super.writeExternal(out);
         transactionId.writeTo(out);
         out.writeBoolean(ready);
@@ -114,7 +116,7 @@ public class BatchedModifications extends MutableCompositeModification {
         if (getVersion() >= DataStoreVersions.FLUORINE_VERSION) {
             if (participatingShardNames != null) {
                 out.writeInt(participatingShardNames.size());
-                for (String shardName: participatingShardNames) {
+                for (String shardName : participatingShardNames) {
                     out.writeObject(shardName);
                 }
             } else {
index 29bb3e9ea623b77f6bc2dc7559af02828a883ae6..0cca8d03ffa7f4cc5f9d78fbed7698c6fab23446 100644 (file)
@@ -16,7 +16,9 @@ import java.io.ObjectOutput;
  *
  * @author Thomas Pantelis
  */
-public class BatchedModificationsReply extends VersionedExternalizableMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class BatchedModificationsReply extends VersionedExternalizableMessage {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private int numBatched;
@@ -24,7 +26,7 @@ public class BatchedModificationsReply extends VersionedExternalizableMessage {
     public BatchedModificationsReply() {
     }
 
-    public BatchedModificationsReply(int numBatched) {
+    public BatchedModificationsReply(final int numBatched) {
         this.numBatched = numBatched;
     }
 
@@ -33,13 +35,13 @@ public class BatchedModificationsReply extends VersionedExternalizableMessage {
     }
 
     @Override
-    public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
         super.readExternal(in);
         numBatched = in.readInt();
     }
 
     @Override
-    public void writeExternal(ObjectOutput out) throws IOException {
+    public void writeExternal(final ObjectOutput out) throws IOException {
         super.writeExternal(out);
         out.writeInt(numBatched);
     }
index 087c7b6376bd44760b8486804b99cc157581d594..f50412fc0e3b3765e279dc06aeffa3da31178782 100644 (file)
@@ -5,28 +5,29 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
 import com.google.common.base.Preconditions;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
-public class CanCommitTransaction extends AbstractThreePhaseCommitMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class CanCommitTransaction extends AbstractThreePhaseCommitMessage {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public CanCommitTransaction() {
     }
 
-    public CanCommitTransaction(TransactionIdentifier transactionID, final short version) {
+    public CanCommitTransaction(final TransactionIdentifier transactionID, final short version) {
         super(transactionID, version);
     }
 
-    public static CanCommitTransaction fromSerializable(Object serializable) {
+    public static CanCommitTransaction fromSerializable(final Object serializable) {
         Preconditions.checkArgument(serializable instanceof CanCommitTransaction);
         return (CanCommitTransaction)serializable;
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof CanCommitTransaction;
     }
 }
index f346cba4334ffce9b93f1ff53e33b9e0401d94fa..5c8fae94b8ee005444f44fbaa3d61d8057dd0e94 100644 (file)
@@ -14,7 +14,11 @@ import java.io.ObjectInput;
 import java.io.ObjectOutput;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 
-public class CanCommitTransactionReply extends VersionedExternalizableMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class CanCommitTransactionReply extends VersionedExternalizableMessage {
+    @java.io.Serial
+    private static final long serialVersionUID = 4355566635423934872L;
+
     private static final CanCommitTransactionReply YES =
             new CanCommitTransactionReply(true, DataStoreVersions.CURRENT_VERSION);
     private static final CanCommitTransactionReply NO =
@@ -35,13 +39,13 @@ public class CanCommitTransactionReply extends VersionedExternalizableMessage {
     }
 
     @Override
-    public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
         super.readExternal(in);
         canCommit = in.readBoolean();
     }
 
     @Override
-    public void writeExternal(ObjectOutput out) throws IOException {
+    public void writeExternal(final ObjectOutput out) throws IOException {
         super.writeExternal(out);
         out.writeBoolean(canCommit);
     }
@@ -51,11 +55,11 @@ public class CanCommitTransactionReply extends VersionedExternalizableMessage {
         return "CanCommitTransactionReply [canCommit=" + canCommit + ", version=" + getVersion() + "]";
     }
 
-    public static CanCommitTransactionReply yes(short version) {
+    public static CanCommitTransactionReply yes(final short version) {
         return version == DataStoreVersions.CURRENT_VERSION ? YES : new CanCommitTransactionReply(true, version);
     }
 
-    public static CanCommitTransactionReply no(short version) {
+    public static CanCommitTransactionReply no(final short version) {
         return version == DataStoreVersions.CURRENT_VERSION ? NO : new CanCommitTransactionReply(false, version);
     }
 
@@ -64,7 +68,7 @@ public class CanCommitTransactionReply extends VersionedExternalizableMessage {
         return (CanCommitTransactionReply)serializable;
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof CanCommitTransactionReply;
     }
 }
index 3b5c6b3b8c89e81e0df5d0aac2c283f17ce893b9..327dca0e801ef001cc10643b39e61ea6843c582a 100644 (file)
@@ -10,6 +10,7 @@ package org.opendaylight.controller.cluster.datastore.messages;
 import java.io.Serializable;
 
 public final class CloseDataTreeNotificationListenerRegistration implements Serializable {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
     private static final CloseDataTreeNotificationListenerRegistration INSTANCE =
             new CloseDataTreeNotificationListenerRegistration();
@@ -21,6 +22,7 @@ public final class CloseDataTreeNotificationListenerRegistration implements Seri
         return INSTANCE;
     }
 
+    @java.io.Serial
     private Object readResolve() {
         return INSTANCE;
     }
index 0bc5254c1459a47dc19bb2e505738558168d5e69..ae825106ad36e56cbb677c78c089d621559d79cb 100644 (file)
@@ -10,6 +10,7 @@ package org.opendaylight.controller.cluster.datastore.messages;
 import java.io.Serializable;
 
 public final class CloseDataTreeNotificationListenerRegistrationReply implements Serializable {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
     private static final CloseDataTreeNotificationListenerRegistrationReply INSTANCE =
             new CloseDataTreeNotificationListenerRegistrationReply();
@@ -22,6 +23,7 @@ public final class CloseDataTreeNotificationListenerRegistrationReply implements
         return INSTANCE;
     }
 
+    @java.io.Serial
     private Object readResolve() {
         return INSTANCE;
     }
index 1a3567cafa0c2989490b78f037555347bd6701b1..5b3c050e4b17a755da8e44ecab7ab9833891b0db 100644 (file)
@@ -5,20 +5,21 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
-public class CloseTransaction extends VersionedExternalizableMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class CloseTransaction extends VersionedExternalizableMessage {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public CloseTransaction() {
     }
 
-    public CloseTransaction(short version) {
+    public CloseTransaction(final short version) {
         super(version);
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof CloseTransaction;
     }
 }
index 1e96286eb984bc84e3fb0d7e63f4edb65847d718..d06b7319b4433bbf326cfc5e1ace2602c96335e3 100644 (file)
@@ -16,8 +16,10 @@ import java.io.ObjectOutput;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.yangtools.concepts.Identifiable;
 
-public class CloseTransactionChain extends VersionedExternalizableMessage
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class CloseTransactionChain extends VersionedExternalizableMessage
         implements Identifiable<LocalHistoryIdentifier> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private LocalHistoryIdentifier transactionChainId;
index 0e21b578ca91f59cc814f62f7c1ffb7d02e0c028..a746580516e76f12fb91f6cb57291f6f135332f5 100644 (file)
@@ -5,9 +5,9 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class CloseTransactionReply extends VersionedExternalizableMessage {
     private static final long serialVersionUID = 1L;
 
index fe13e5d8b177439cc31e1e72520a3bfb2e5868ef..bd80287ae9c187bdb8e2d30127cd0b74adac8654 100644 (file)
@@ -5,28 +5,29 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
 import com.google.common.base.Preconditions;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
-public class CommitTransaction extends AbstractThreePhaseCommitMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class CommitTransaction extends AbstractThreePhaseCommitMessage {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public CommitTransaction() {
     }
 
-    public CommitTransaction(TransactionIdentifier transactionID, final short version) {
+    public CommitTransaction(final TransactionIdentifier transactionID, final short version) {
         super(transactionID, version);
     }
 
-    public static CommitTransaction fromSerializable(Object serializable) {
+    public static CommitTransaction fromSerializable(final Object serializable) {
         Preconditions.checkArgument(serializable instanceof CommitTransaction);
         return (CommitTransaction)serializable;
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof CommitTransaction;
     }
 }
index cd3a13a9f7a8d8a4948cc5d0b1d3395c45b4e44b..167124c6fe672258632e1ebe84ca36cf1b513832 100644 (file)
@@ -5,26 +5,29 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 
-public class CommitTransactionReply extends VersionedExternalizableMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class CommitTransactionReply extends VersionedExternalizableMessage {
+    @java.io.Serial
+    private static final long serialVersionUID = -8342450250867395000L;
+
     public static final CommitTransactionReply INSTANCE = new CommitTransactionReply();
 
     public CommitTransactionReply() {
     }
 
-    private CommitTransactionReply(short version) {
+    private CommitTransactionReply(final short version) {
         super(version);
     }
 
-    public static CommitTransactionReply instance(short version) {
+    public static CommitTransactionReply instance(final short version) {
         return version == DataStoreVersions.CURRENT_VERSION ? INSTANCE : new CommitTransactionReply(version);
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof CommitTransactionReply;
     }
 }
index 3283a55f438a77644fe35ad403436a2bfdac58f2..5ef056e8a0cb7127db01c680230564abc216e6bd 100644 (file)
@@ -15,7 +15,9 @@ import java.io.ObjectInput;
 import java.io.ObjectOutput;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
-public class CreateTransaction extends VersionedExternalizableMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class CreateTransaction extends VersionedExternalizableMessage {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private TransactionIdentifier transactionId;
index 87dd7c57fb5d8c64cea1d950bc75ec976ab989ab..644daf21fbdc4b158946f9785f1f4f96dcf28b7b 100644 (file)
@@ -15,7 +15,9 @@ import java.io.ObjectInput;
 import java.io.ObjectOutput;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
-public class CreateTransactionReply extends VersionedExternalizableMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class CreateTransactionReply extends VersionedExternalizableMessage {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private String transactionPath;
@@ -40,14 +42,14 @@ public class CreateTransactionReply extends VersionedExternalizableMessage {
     }
 
     @Override
-    public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
         super.readExternal(in);
         transactionId = TransactionIdentifier.readFrom(in);
         transactionPath = in.readUTF();
     }
 
     @Override
-    public void writeExternal(ObjectOutput out) throws IOException {
+    public void writeExternal(final ObjectOutput out) throws IOException {
         super.writeExternal(out);
         transactionId.writeTo(out);
         out.writeUTF(transactionPath);
@@ -60,12 +62,12 @@ public class CreateTransactionReply extends VersionedExternalizableMessage {
                 + ", version=" + getVersion() + "]";
     }
 
-    public static CreateTransactionReply fromSerializable(Object serializable) {
+    public static CreateTransactionReply fromSerializable(final Object serializable) {
         checkArgument(serializable instanceof CreateTransactionReply);
         return (CreateTransactionReply)serializable;
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof CreateTransactionReply;
     }
 }
index b7e38d50824095a4f0e31c094272593ef704441c..6c646f7cc3dd630223287d5cef2dfca6ba959e27 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
 import com.google.common.base.Preconditions;
@@ -15,6 +14,7 @@ import org.opendaylight.mdsal.common.api.ReadFailedException;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class DataExists extends AbstractRead<Boolean> {
     private static final long serialVersionUID = 1L;
 
@@ -26,12 +26,12 @@ public class DataExists extends AbstractRead<Boolean> {
     }
 
     @Override
-    public FluentFuture<Boolean> apply(DOMStoreReadTransaction readDelegate) {
+    public FluentFuture<Boolean> apply(final DOMStoreReadTransaction readDelegate) {
         return readDelegate.exists(getPath());
     }
 
     @Override
-    public void processResponse(Object response, SettableFuture<Boolean> returnFuture) {
+    public void processResponse(final Object response, final SettableFuture<Boolean> returnFuture) {
         if (DataExistsReply.isSerializedType(response)) {
             returnFuture.set(Boolean.valueOf(DataExistsReply.fromSerializable(response).exists()));
         } else {
@@ -41,7 +41,7 @@ public class DataExists extends AbstractRead<Boolean> {
     }
 
     @Override
-    protected AbstractRead<Boolean> newInstance(short withVersion) {
+    protected AbstractRead<Boolean> newInstance(final short withVersion) {
         return new DataExists(getPath(), withVersion);
     }
 
@@ -50,7 +50,7 @@ public class DataExists extends AbstractRead<Boolean> {
         return (DataExists)serializable;
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof DataExists;
     }
 }
index a57df0ecdab14a3bf3400a14c5efaef6d29b34b1..799cd8b86e3d8d2f1903a753dcdbd1e3586b9131 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
 import com.google.common.base.Preconditions;
@@ -13,6 +12,7 @@ import java.io.IOException;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class DataExistsReply extends VersionedExternalizableMessage {
     private static final long serialVersionUID = 1L;
 
@@ -31,13 +31,13 @@ public class DataExistsReply extends VersionedExternalizableMessage {
     }
 
     @Override
-    public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
         super.readExternal(in);
         exists = in.readBoolean();
     }
 
     @Override
-    public void writeExternal(ObjectOutput out) throws IOException {
+    public void writeExternal(final ObjectOutput out) throws IOException {
         super.writeExternal(out);
         out.writeBoolean(exists);
     }
@@ -47,7 +47,7 @@ public class DataExistsReply extends VersionedExternalizableMessage {
         return (DataExistsReply)serializable;
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof DataExistsReply;
     }
 }
index 03ae771ca1a0f0ad43c2b9ccf52e062c095aa234..63f39519f5a35e7b2d63f528c8bc7f157782d90d 100644 (file)
@@ -9,8 +9,8 @@ package org.opendaylight.controller.cluster.datastore.messages;
 
 import static java.util.Objects.requireNonNull;
 
-import java.util.Collection;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import java.util.List;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 
 /**
  * A message about a DataTree having been changed. The message is not
@@ -19,9 +19,9 @@ import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
  * candidate.
  */
 public final class DataTreeChanged {
-    private final Collection<DataTreeCandidate> changes;
+    private final List<DataTreeCandidate> changes;
 
-    public DataTreeChanged(final Collection<DataTreeCandidate> changes) {
+    public DataTreeChanged(final List<DataTreeCandidate> changes) {
         this.changes = requireNonNull(changes);
     }
 
@@ -30,7 +30,7 @@ public final class DataTreeChanged {
      *
      * @return Change events
      */
-    public Collection<DataTreeCandidate> getChanges() {
+    public List<DataTreeCandidate> getChanges() {
         return changes;
     }
 }
index d50079e6a12f7bb3f702c3092df5ee1a3ec13711..06e898e09a66062cd59bd0be81bcc388813f11ee 100644 (file)
@@ -10,6 +10,7 @@ package org.opendaylight.controller.cluster.datastore.messages;
 import java.io.Serializable;
 
 public final class DataTreeChangedReply implements Serializable {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
     private static final DataTreeChangedReply INSTANCE = new DataTreeChangedReply();
 
@@ -21,6 +22,7 @@ public final class DataTreeChangedReply implements Serializable {
         return INSTANCE;
     }
 
+    @java.io.Serial
     private Object readResolve() {
         return INSTANCE;
     }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/EmptyExternalizable.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/EmptyExternalizable.java
deleted file mode 100644 (file)
index c7ee83a..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.messages;
-
-import java.io.Externalizable;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-
-/**
- * Externalizable with no data.
- *
- * @author Thomas Pantelis
- */
-public class EmptyExternalizable implements Externalizable {
-
-    @Override
-    public void readExternal(ObjectInput in) {
-    }
-
-    @Override
-    public void writeExternal(ObjectOutput out) {
-    }
-}
index 2042e49d654cf75f08a320ecb8626fc6cd5b348e..fbc66a4d057099c22ca7c77ae79eb261ccf3e562 100644 (file)
@@ -20,16 +20,17 @@ import org.opendaylight.controller.cluster.datastore.ReadWriteShardDataTreeTrans
  *
  * @author Thomas Pantelis
  */
-public class ForwardedReadyTransaction {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class ForwardedReadyTransaction {
     private final TransactionIdentifier transactionId;
     private final ReadWriteShardDataTreeTransaction transaction;
     private final boolean doImmediateCommit;
     private final short txnClientVersion;
-    private @Nullable final SortedSet<String> participatingShardNames;
+    private final @Nullable SortedSet<String> participatingShardNames;
 
-    public ForwardedReadyTransaction(TransactionIdentifier transactionId, short txnClientVersion,
-            ReadWriteShardDataTreeTransaction transaction, boolean doImmediateCommit,
-            Optional<SortedSet<String>> participatingShardNames) {
+    public ForwardedReadyTransaction(final TransactionIdentifier transactionId, final short txnClientVersion,
+            final ReadWriteShardDataTreeTransaction transaction, final boolean doImmediateCommit,
+            final Optional<SortedSet<String>> participatingShardNames) {
         this.transactionId = requireNonNull(transactionId);
         this.transaction = requireNonNull(transaction);
         this.txnClientVersion = txnClientVersion;
index f1e7fb70b5b37fb8eede14673c3fdcbdca2a92f5..30ac1a9eb73c27f0eab09e416b13772d32c81fcc 100644 (file)
@@ -14,6 +14,7 @@ import org.eclipse.jdt.annotation.NonNull;
  * Request a shard to report the clients it knows about. Shard is required to respond with {@link GetKnownClientsReply}.
  */
 public final class GetKnownClients implements Serializable {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public static final @NonNull GetKnownClients INSTANCE = new GetKnownClients();
@@ -22,6 +23,7 @@ public final class GetKnownClients implements Serializable {
 
     }
 
+    @java.io.Serial
     private Object readResolve() {
         return INSTANCE;
     }
index e4b9174f1eb9354cd6f39c61a3eec697b8b82dfe..fc0df4a951bcba4bb80c266c1cf8b3946abbe621 100644 (file)
@@ -11,7 +11,7 @@ import static java.util.Objects.requireNonNull;
 
 import org.apache.commons.lang3.ObjectUtils;
 import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
 
 /**
  * Local message sent in reply to FindPrimaryShard to indicate the primary shard is local to the caller.
@@ -19,11 +19,11 @@ import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
  * @author Thomas Pantelis
  */
 public class LocalPrimaryShardFound {
-
     private final String primaryPath;
     private final ReadOnlyDataTree localShardDataTree;
 
-    public LocalPrimaryShardFound(@NonNull  String primaryPath, @NonNull ReadOnlyDataTree localShardDataTree) {
+    public LocalPrimaryShardFound(final @NonNull String primaryPath,
+            final @NonNull ReadOnlyDataTree localShardDataTree) {
         this.primaryPath = requireNonNull(primaryPath);
         this.localShardDataTree = requireNonNull(localShardDataTree);
     }
index 1ca06216dd1157a54727ae67c8c8124d2fc0bafa..c9d10a62e60a4061b5d5dc3eb4891415907adce6 100644 (file)
@@ -12,7 +12,7 @@ import static java.util.Objects.requireNonNull;
 import akka.actor.ActorSelection;
 import java.util.Optional;
 import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
 
 /**
  * Local message DTO that contains information about the primary shard.
@@ -24,17 +24,17 @@ public class PrimaryShardInfo {
     private final short primaryShardVersion;
     private final ReadOnlyDataTree localShardDataTree;
 
-    public PrimaryShardInfo(@NonNull ActorSelection primaryShardActor, short primaryShardVersion,
-            @NonNull ReadOnlyDataTree localShardDataTree) {
+    public PrimaryShardInfo(final @NonNull ActorSelection primaryShardActor, final short primaryShardVersion,
+            final @NonNull ReadOnlyDataTree localShardDataTree) {
         this.primaryShardActor = requireNonNull(primaryShardActor);
         this.primaryShardVersion = primaryShardVersion;
         this.localShardDataTree = requireNonNull(localShardDataTree);
     }
 
-    public PrimaryShardInfo(@NonNull ActorSelection primaryShardActor, short primaryShardVersion) {
+    public PrimaryShardInfo(final @NonNull ActorSelection primaryShardActor, final short primaryShardVersion) {
         this.primaryShardActor = requireNonNull(primaryShardActor);
         this.primaryShardVersion = primaryShardVersion;
-        this.localShardDataTree = null;
+        localShardDataTree = null;
     }
 
     /**
index 1113a854bfda89e457bf9cd17c736d9e2f8159fa..8172d64d52ccba8fa75a1b48054d057702e95355 100644 (file)
@@ -16,6 +16,7 @@ import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class ReadData extends AbstractRead<Optional<NormalizedNode>> {
     private static final long serialVersionUID = 1L;
 
index 099ca228cd4c4bc5eebc046686e731da1a0fad6d..2ed53ad0501a81e6ce9615fa318f4fa7a3fbb1c3 100644 (file)
@@ -13,6 +13,7 @@ import java.io.ObjectOutput;
 import org.opendaylight.controller.cluster.datastore.node.utils.stream.SerializationUtils;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class ReadDataReply extends VersionedExternalizableMessage {
     private static final long serialVersionUID = 1L;
 
index d2957a4a54ae51e9de8dd8a03ae27b45437947ee..55ab132db699e276cc0370196fa099a10ec6cea4 100644 (file)
@@ -14,7 +14,7 @@ import java.util.SortedSet;
 import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 /**
  * Message notifying the shard leader to apply modifications which have been
@@ -23,6 +23,7 @@ import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification
  * to be sent out to a remote system, it needs to be intercepted by {@link ReadyLocalTransactionSerializer}
  * and turned into {@link BatchedModifications}.
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public final class ReadyLocalTransaction {
     private final DataTreeModification modification;
     private final TransactionIdentifier transactionId;
@@ -33,7 +34,7 @@ public final class ReadyLocalTransaction {
     private short remoteVersion = DataStoreVersions.CURRENT_VERSION;
 
     public ReadyLocalTransaction(final TransactionIdentifier transactionId, final DataTreeModification modification,
-            final boolean doCommitOnReady, Optional<SortedSet<String>> participatingShardNames) {
+            final boolean doCommitOnReady, final Optional<SortedSet<String>> participatingShardNames) {
         this.transactionId = requireNonNull(transactionId);
         this.modification = requireNonNull(modification);
         this.doCommitOnReady = doCommitOnReady;
@@ -56,7 +57,7 @@ public final class ReadyLocalTransaction {
         return remoteVersion;
     }
 
-    public void setRemoteVersion(short remoteVersion) {
+    public void setRemoteVersion(final short remoteVersion) {
         this.remoteVersion = remoteVersion;
     }
 
index c44f9105c0c093e16005b13e888d37bf64a5b861..7346a8c07e5d3401610c1f5a237cd6c026af33e6 100644 (file)
@@ -24,8 +24,8 @@ import org.opendaylight.controller.cluster.datastore.utils.AbstractBatchedModifi
  * into akka serialization to allow forwarding of ReadyLocalTransaction to remote
  * shards.
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public final class ReadyLocalTransactionSerializer extends JSerializer {
-
     private final ExtendedActorSystem system;
 
     public ReadyLocalTransactionSerializer(final ExtendedActorSystem system) {
index 5ddc77f8f624c15f2e75dcebf3adbc693cebfde4..a341c72333e3ca92c123ef749bd9a9ab9f6226d5 100644 (file)
@@ -13,6 +13,7 @@ import java.io.ObjectInput;
 import java.io.ObjectOutput;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class ReadyTransactionReply extends VersionedExternalizableMessage {
     private static final long serialVersionUID = 1L;
 
@@ -21,11 +22,11 @@ public class ReadyTransactionReply extends VersionedExternalizableMessage {
     public ReadyTransactionReply() {
     }
 
-    public ReadyTransactionReply(String cohortPath) {
+    public ReadyTransactionReply(final String cohortPath) {
         this(cohortPath, DataStoreVersions.CURRENT_VERSION);
     }
 
-    public ReadyTransactionReply(String cohortPath, short version) {
+    public ReadyTransactionReply(final String cohortPath, final short version) {
         super(version);
         this.cohortPath = cohortPath;
     }
@@ -35,22 +36,22 @@ public class ReadyTransactionReply extends VersionedExternalizableMessage {
     }
 
     @Override
-    public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
         super.readExternal(in);
         cohortPath = in.readUTF();
     }
 
     @Override
-    public void writeExternal(ObjectOutput out) throws IOException {
+    public void writeExternal(final ObjectOutput out) throws IOException {
         super.writeExternal(out);
         out.writeUTF(cohortPath);
     }
 
-    public static ReadyTransactionReply fromSerializable(Object serializable) {
+    public static ReadyTransactionReply fromSerializable(final Object serializable) {
         return (ReadyTransactionReply)serializable;
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof ReadyTransactionReply;
     }
 }
index cbf2cf9e0fb189d5b2c3ea2e090dca6dd29d111a..c92670c97138c66060be1d149a0f15a213f4828c 100644 (file)
@@ -9,11 +9,10 @@ package org.opendaylight.controller.cluster.datastore.messages;
 
 import static java.util.Objects.requireNonNull;
 
-import java.util.Optional;
 import org.eclipse.jdt.annotation.NonNull;
 import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
 
 /**
  * A local message derived from LeaderStateChanged containing additional Shard-specific info that is sent
@@ -22,22 +21,22 @@ import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
  *
  * @author Thomas Pantelis
  */
-public class ShardLeaderStateChanged extends LeaderStateChanged {
-    private final ReadOnlyDataTree localShardDataTree;
+public final class ShardLeaderStateChanged extends LeaderStateChanged {
+    private final @Nullable ReadOnlyDataTree localShardDataTree;
 
-    public ShardLeaderStateChanged(@NonNull String memberId, @Nullable String leaderId,
-            @NonNull ReadOnlyDataTree localShardDataTree, short leaderPayloadVersion) {
+    public ShardLeaderStateChanged(final @NonNull String memberId, final @Nullable String leaderId,
+            final @NonNull ReadOnlyDataTree localShardDataTree, final short leaderPayloadVersion) {
         super(memberId, leaderId, leaderPayloadVersion);
         this.localShardDataTree = requireNonNull(localShardDataTree);
     }
 
-    public ShardLeaderStateChanged(@NonNull String memberId, @Nullable String leaderId,
-            short leaderPayloadVersion) {
+    public ShardLeaderStateChanged(final @NonNull String memberId, final @Nullable String leaderId,
+            final short leaderPayloadVersion) {
         super(memberId, leaderId, leaderPayloadVersion);
-        this.localShardDataTree = null;
+        localShardDataTree = null;
     }
 
-    public @NonNull Optional<ReadOnlyDataTree> getLocalShardDataTree() {
-        return Optional.ofNullable(localShardDataTree);
+    public @Nullable ReadOnlyDataTree localShardDataTree() {
+        return localShardDataTree;
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/SuccessReply.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/SuccessReply.java
deleted file mode 100644 (file)
index 4f74bda..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.messages;
-
-import java.io.Serializable;
-
-/**
- * A reply message indicating success.
- *
- * @author Thomas Pantelis
- */
-public final class SuccessReply implements Serializable {
-    private static final long serialVersionUID = 1L;
-
-    public static final SuccessReply INSTANCE = new SuccessReply();
-
-    private SuccessReply() {
-    }
-}
index ad32ecfb69f73910b9e3baef4d4a91839bfff783..16e59cc2338f5f98c7714c1aca325a9c3711070c 100644 (file)
@@ -7,11 +7,14 @@
  */
 package org.opendaylight.controller.cluster.datastore.messages;
 
+import static java.util.Objects.requireNonNull;
+
+import org.eclipse.jdt.annotation.NonNullByDefault;
 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
-import org.opendaylight.yangtools.yang.model.spi.AbstractEffectiveModelContextProvider;
 
-public class UpdateSchemaContext extends AbstractEffectiveModelContextProvider {
-    public UpdateSchemaContext(final EffectiveModelContext modelContext) {
-        super(modelContext);
+@NonNullByDefault
+public record UpdateSchemaContext(EffectiveModelContext modelContext) {
+    public UpdateSchemaContext {
+        requireNonNull(modelContext);
     }
 }
index 687905d7225991b4bf266987e93bed153368485c..9548a7ebdc7d4432c6e162915f8e353c04dc4477 100644 (file)
@@ -20,6 +20,7 @@ import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVer
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public abstract class VersionedExternalizableMessage implements Externalizable, SerializableMessage {
     private static final long serialVersionUID = 1L;
 
@@ -38,10 +39,10 @@ public abstract class VersionedExternalizableMessage implements Externalizable,
     }
 
     protected final @NonNull NormalizedNodeStreamVersion getStreamVersion() {
-        if (version >= DataStoreVersions.PHOSPHORUS_VERSION) {
+        if (version >= DataStoreVersions.POTASSIUM_VERSION) {
+            return NormalizedNodeStreamVersion.POTASSIUM;
+        } else if (version >= DataStoreVersions.PHOSPHORUS_VERSION) {
             return NormalizedNodeStreamVersion.MAGNESIUM;
-        } else if (version == DataStoreVersions.SODIUM_SR1_VERSION) {
-            return NormalizedNodeStreamVersion.SODIUM_SR1;
         } else {
             throw new IllegalStateException("Unsupported version " + version);
         }
index 33bd4d45e191419d605153463eafd2e00987830e..3eec9a4a46469ad17a86e9fbe281bbebcccf0ebe 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.modification;
 
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
@@ -13,7 +12,10 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 /**
  * Base class to be used for all simple modifications that can be applied to a DOMStoreTransaction.
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public abstract class AbstractModification implements Modification {
+    @java.io.Serial
+    private static final long serialVersionUID = 2647778426312509718L;
 
     private YangInstanceIdentifier path;
     private short version;
index b59132fe874471eb7689459788849368ef31190a..3705707de296aac34104683243d81bcc1b0d39ba 100644 (file)
@@ -16,6 +16,7 @@ import java.util.List;
  * A CompositeModification gets stored in the transaction log for a Shard. During recovery when the transaction log
  * is being replayed a DOMStoreWriteTransaction could be created and a CompositeModification could be applied to it.
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public interface CompositeModification extends Modification {
     /**
      * Get a list of modifications contained by this composite.
index 38517aa04b8d296a8393e6c36045e8fba21ba554..267dfa8368255955ea4a549f73103259d4d990e2 100644 (file)
@@ -14,14 +14,16 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 import org.opendaylight.controller.cluster.datastore.node.utils.stream.SerializationUtils;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
 import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
 import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 /**
  * DeleteModification store all the parameters required to delete a path from the data tree.
  */
-public class DeleteModification extends AbstractModification {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class DeleteModification extends AbstractModification {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public DeleteModification() {
index 098a89521a722caaa9efc2012898727618412d70..9244a38c80e92b3014b5376ce3a1e4684d28bac3 100644 (file)
@@ -13,13 +13,14 @@ import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.data.api.schema.stream.ReusableStreamReceiver;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
 import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 /**
  * MergeModification stores all the parameters required to merge data into the specified path.
  */
-public class MergeModification extends WriteModification {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class MergeModification extends WriteModification {
     private static final long serialVersionUID = 1L;
 
     public MergeModification() {
index 59ca8eb3671370fecf91fa572f2c5b68c56dc851..e7757f36fcdbe214d5a8a793e0057d878c6a03e3 100644 (file)
@@ -12,8 +12,8 @@ import java.io.IOException;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
 import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 /**
  * Represents a modification to the data store.
index a9ffe9b1ba877c40718a946df5ecf6bf591d4ca2..26e049089ed32782c9f35f600cdc1bf70cff509d 100644 (file)
@@ -20,14 +20,15 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 import org.opendaylight.controller.cluster.datastore.messages.VersionedExternalizableMessage;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
 import org.opendaylight.yangtools.yang.data.api.schema.stream.ReusableStreamReceiver;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
 import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
 import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
 import org.opendaylight.yangtools.yang.data.impl.schema.ReusableImmutableNormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 /**
  * MutableCompositeModification is just a mutable version of a CompositeModification.
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class MutableCompositeModification extends VersionedExternalizableMessage implements CompositeModification {
     private static final long serialVersionUID = 1L;
 
index 9e00d4b174dd1f9e28553e8561b03362303345b6..dc2d3fff4310564f3cf38e8a0b5f91b34786a1a3 100644 (file)
@@ -16,13 +16,14 @@ import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.data.api.schema.stream.ReusableStreamReceiver;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
 import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
 import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 /**
  * WriteModification stores all the parameters required to write data to the specified path.
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class WriteModification extends AbstractModification {
     private static final long serialVersionUID = 1L;
 
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AT.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AT.java
new file mode 100644 (file)
index 0000000..8002815
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.io.ByteStreams;
+import java.io.IOException;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload.SerialForm;
+
+/**
+ * Serialization proxy for {@link AbortTransactionPayload}.
+ */
+final class AT implements SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionIdentifier identifier;
+    private byte[] bytes;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public AT() {
+        // For Externalizable
+    }
+
+    AT(final byte[] bytes) {
+        this.bytes = requireNonNull(bytes);
+    }
+
+    @Override
+    public byte[] bytes() {
+        return bytes;
+    }
+
+    @Override
+    public void readExternal(final byte[] newBytes) throws IOException {
+        bytes = requireNonNull(newBytes);
+        identifier = verifyNotNull(TransactionIdentifier.readFrom(ByteStreams.newDataInput(newBytes)));
+    }
+
+    @Override
+    public Object readResolve() {
+        return new AbortTransactionPayload(identifier, bytes);
+    }
+}
index 0e34756cede02dfef04496f4f2e1f4066391b220..3c765be6152284aa8f50b17a1d3b3a223fa4fe06 100644 (file)
@@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.datastore.persisted;
 
 import com.google.common.io.ByteArrayDataOutput;
 import com.google.common.io.ByteStreams;
-import java.io.DataInput;
 import java.io.IOException;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.slf4j.Logger;
@@ -21,34 +20,10 @@ import org.slf4j.LoggerFactory;
  * @author Robert Varga
  */
 public final class AbortTransactionPayload extends AbstractIdentifiablePayload<TransactionIdentifier> {
-    private static final class Proxy extends AbstractProxy<TransactionIdentifier> {
-        private static final long serialVersionUID = 1L;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final byte[] serialized) {
-            super(serialized);
-        }
-
-        @Override
-        protected TransactionIdentifier readIdentifier(final DataInput in) throws IOException {
-            return TransactionIdentifier.readFrom(in);
-        }
-
-        @Override
-        protected AbortTransactionPayload createObject(final TransactionIdentifier identifier,
-                final byte[] serialized) {
-            return new AbortTransactionPayload(identifier, serialized);
-        }
-    }
-
     private static final Logger LOG = LoggerFactory.getLogger(AbortTransactionPayload.class);
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+    private static final int PROXY_SIZE = externalizableProxySize(AT::new);
 
     AbortTransactionPayload(final TransactionIdentifier transactionId, final byte[] serialized) {
         super(transactionId, serialized);
@@ -62,13 +37,18 @@ public final class AbortTransactionPayload extends AbstractIdentifiablePayload<T
         } catch (IOException e) {
             // This should never happen
             LOG.error("Failed to serialize {}", transactionId, e);
-            throw new RuntimeException("Failed to serialized " + transactionId, e);
+            throw new IllegalStateException("Failed to serialized " + transactionId, e);
         }
         return new AbortTransactionPayload(transactionId, out.toByteArray());
     }
 
     @Override
-    protected Proxy externalizableProxy(final byte[] serialized) {
-        return new Proxy(serialized);
+    protected AT externalizableProxy(final byte[] serialized) {
+        return new AT(serialized);
+    }
+
+    @Override
+    protected int externalizableProxySize() {
+        return PROXY_SIZE;
     }
 }
index 453bbbb21aab5b76ad9144abdd0531664d3c21c3..9d1bb9a3464c31aabc0e3191dc9060a14daf62f8 100644 (file)
@@ -10,55 +10,54 @@ package org.opendaylight.controller.cluster.datastore.persisted;
 import static java.util.Objects.requireNonNull;
 
 import java.util.Collection;
-import java.util.Optional;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
 
 /**
- * Abstract base class for our internal implementation of {@link DataTreeCandidateNode},
- * which we instantiate from a serialized stream. We do not retain the before-image and
- * do not implement {@link #getModifiedChild(PathArgument)}, as that method is only
- * useful for end users. Instances based on this class should never be leaked outside of
- * this component.
+ * Abstract base class for our internal implementation of {@link DataTreeCandidateNode}, which we instantiate from a
+ * serialized stream. We do not retain the before-image and do not implement {@link #modifiedChild(PathArgument)}, as
+ * that method is only useful for end users. Instances based on this class should never be leaked outside of this
+ * component.
  */
 abstract class AbstractDataTreeCandidateNode implements DataTreeCandidateNode {
-    private final ModificationType type;
+    private final @NonNull ModificationType type;
 
     protected AbstractDataTreeCandidateNode(final ModificationType type) {
         this.type = requireNonNull(type);
     }
 
     @Override
-    public final Optional<DataTreeCandidateNode> getModifiedChild(final PathArgument identifier) {
+    public final DataTreeCandidateNode modifiedChild(final PathArgument identifier) {
         throw new UnsupportedOperationException("Not implemented");
     }
 
     @Override
-    public final ModificationType getModificationType() {
+    public final ModificationType modificationType() {
         return type;
     }
 
     @Override
-    public final Optional<NormalizedNode> getDataBefore() {
+    public final NormalizedNode dataBefore() {
         throw new UnsupportedOperationException("Before-image not available after serialization");
     }
 
     static DataTreeCandidateNode createUnmodified() {
         return new AbstractDataTreeCandidateNode(ModificationType.UNMODIFIED) {
             @Override
-            public PathArgument getIdentifier() {
+            public PathArgument name() {
                 throw new UnsupportedOperationException("Root node does not have an identifier");
             }
 
             @Override
-            public Optional<NormalizedNode> getDataAfter() {
+            public NormalizedNode dataAfter() {
                 throw new UnsupportedOperationException("After-image not available after serialization");
             }
 
             @Override
-            public Collection<DataTreeCandidateNode> getChildNodes() {
+            public Collection<DataTreeCandidateNode> childNodes() {
                 throw new UnsupportedOperationException("Children not available after serialization");
             }
         };
index de9b0bba8a3acd6668fd8c0c54b7edb4d9998092..885b6c5336dc4999075f33b01bf4e19bfbe1af4a 100644 (file)
@@ -10,64 +10,78 @@ package org.opendaylight.controller.cluster.datastore.persisted;
 import static com.google.common.base.Verify.verifyNotNull;
 import static java.util.Objects.requireNonNull;
 
-import com.google.common.io.ByteStreams;
-import java.io.DataInput;
+import com.google.common.base.MoreObjects;
 import java.io.Externalizable;
 import java.io.IOException;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
-import java.io.Serializable;
+import java.util.function.Function;
+import org.apache.commons.lang3.SerializationUtils;
 import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.IdentifiablePayload;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.yangtools.concepts.Identifiable;
+import org.opendaylight.controller.cluster.raft.messages.IdentifiablePayload;
 import org.opendaylight.yangtools.concepts.Identifier;
 
 /**
- * Abstract base class for {@link Payload}s which hold a single {@link Identifier}.
- *
- * @author Robert Varga
+ * Abstract base class for {@link IdentifiablePayload}s which hold a single {@link Identifier}.
  */
-public abstract class AbstractIdentifiablePayload<T extends Identifier> extends IdentifiablePayload<T>
-        implements Serializable {
-    protected abstract static class AbstractProxy<T extends Identifier> implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private byte[] serialized;
-        private T identifier;
-
-        public AbstractProxy() {
-            // For Externalizable
-        }
-
-        protected AbstractProxy(final byte[] serialized) {
-            this.serialized = requireNonNull(serialized);
-        }
-
+public abstract class AbstractIdentifiablePayload<T extends Identifier> extends IdentifiablePayload<T> {
+    /**
+     * An {@link Externalizable} with default implementations we expect our implementations to comply with. On-wire
+     * serialization format is defined by {@link #bytes()}.
+     */
+    protected interface SerialForm extends Externalizable {
+        /**
+         * Return the serial form of this object contents, corresponding to
+         * {@link AbstractIdentifiablePayload#serialized}.
+         *
+         * @return Serialized form
+         */
+        byte[] bytes();
+
+        /**
+         * Resolve this proxy to an actual {@link AbstractIdentifiablePayload}.
+         *
+         * @return A payload.
+         */
+        @java.io.Serial
+        Object readResolve();
+
+        /**
+         * Restore state from specified serialized form.
+         *
+         * @param newBytes Serialized form, as returned by {@link #bytes()}
+         * @throws IOException when a deserialization problem occurs
+         */
+        void readExternal(byte[] newBytes) throws IOException;
+
+        /**
+         * {@inheritDoc}
+         *
+         * <p>
+         * The default implementation is canonical and should never be overridden.
+         */
         @Override
-        public final void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeInt(serialized.length);
-            out.write(serialized);
+        default void readExternal(final ObjectInput in) throws IOException {
+            final var bytes = new byte[in.readInt()];
+            in.readFully(bytes);
+            readExternal(bytes);
         }
 
+        /**
+         * {@inheritDoc}
+         *
+         * <p>
+         * The default implementation is canonical and should never be overridden.
+         */
         @Override
-        public final void readExternal(final ObjectInput in) throws IOException {
-            final int length = in.readInt();
-            serialized = new byte[length];
-            in.readFully(serialized);
-            identifier = verifyNotNull(readIdentifier(ByteStreams.newDataInput(serialized)));
-        }
-
-        protected final Object readResolve() {
-            return verifyNotNull(createObject(identifier, serialized));
+        default void writeExternal(final ObjectOutput out) throws IOException {
+            final var bytes = bytes();
+            out.writeInt(bytes.length);
+            out.write(bytes);
         }
-
-        protected abstract @NonNull T readIdentifier(@NonNull DataInput in) throws IOException;
-
-        @SuppressWarnings("checkstyle:hiddenField")
-        protected abstract @NonNull Identifiable<T> createObject(@NonNull T identifier, byte @NonNull[] serialized);
     }
 
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final byte @NonNull [] serialized;
@@ -88,10 +102,34 @@ public abstract class AbstractIdentifiablePayload<T extends Identifier> extends
         return serialized.length;
     }
 
-    protected final Object writeReplace() {
+    protected final byte @NonNull [] serialized() {
+        return serialized;
+    }
+
+    @Override
+    public final int serializedSize() {
+        // TODO: this is not entirely accurate, as the serialization stream has additional overheads:
+        //       - 3 bytes for each block of data <256 bytes
+        //       - 5 bytes for each block of data >=256 bytes
+        //       - each block of data is limited to 1024 bytes as per serialization spec
+        return size() + externalizableProxySize();
+    }
+
+    @Override
+    public final String toString() {
+        return MoreObjects.toStringHelper(this).add("identifier", identifier).add("size", size()).toString();
+    }
+
+    @Override
+    public final Object writeReplace() {
         return verifyNotNull(externalizableProxy(serialized));
     }
 
-    @SuppressWarnings("checkstyle:hiddenField")
-    protected abstract @NonNull AbstractProxy<T> externalizableProxy(byte @NonNull[] serialized);
+    protected abstract @NonNull SerialForm externalizableProxy(byte @NonNull[] serialized);
+
+    protected abstract int externalizableProxySize();
+
+    protected static final int externalizableProxySize(final Function<byte[], ? extends SerialForm> constructor) {
+        return SerializationUtils.serialize(constructor.apply(new byte[0])).length;
+    }
 }
index 5e85434e4aa38bba477a3b64bc470a961be317f8..e87ce58a8b6b25574bfd11759df0468ecc3d30aa 100644 (file)
@@ -31,9 +31,9 @@ abstract class AbstractVersionedShardDataTreeSnapshot extends ShardDataTreeSnaps
     static @NonNull ShardSnapshotState versionedDeserialize(final ObjectInput in) throws IOException {
         final PayloadVersion version = PayloadVersion.readFrom(in);
         switch (version) {
-            case SODIUM_SR1:
+            case CHLORINE_SR2:
                 return new ShardSnapshotState(readSnapshot(in), true);
-            case MAGNESIUM:
+            case POTASSIUM:
                 return new ShardSnapshotState(readSnapshot(in), false);
             case TEST_FUTURE_VERSION:
             case TEST_PAST_VERSION:
@@ -75,9 +75,9 @@ abstract class AbstractVersionedShardDataTreeSnapshot extends ShardDataTreeSnaps
 
     private void versionedSerialize(final ObjectOutput out, final PayloadVersion version) throws IOException {
         switch (version) {
-            case SODIUM_SR1:
-            case MAGNESIUM:
-                // Sodium and Magnesium snapshots use Java Serialization, but differ in stream format
+            case CHLORINE_SR2:
+            case POTASSIUM:
+                // Sodium onwards snapshots use Java Serialization, but differ in stream format
                 out.writeObject(this);
                 return;
             case TEST_FUTURE_VERSION:
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CH.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CH.java
new file mode 100644 (file)
index 0000000..a0af841
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.io.ByteStreams;
+import java.io.IOException;
+import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
+import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload.SerialForm;
+
+/**
+ * Serialization proxy for {@link CreateLocalHistoryPayload}.
+ */
+final class CH implements SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private LocalHistoryIdentifier identifier;
+    private byte[] bytes;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public CH() {
+        // For Externalizable
+    }
+
+    CH(final byte[] bytes) {
+        this.bytes = requireNonNull(bytes);
+    }
+
+    @Override
+    public byte[] bytes() {
+        return bytes;
+    }
+
+    @Override
+    public void readExternal(final byte[] newBytes) throws IOException {
+        bytes = requireNonNull(newBytes);
+        identifier = verifyNotNull(LocalHistoryIdentifier.readFrom(ByteStreams.newDataInput(newBytes)));
+    }
+
+    @Override
+    public Object readResolve() {
+        return new CreateLocalHistoryPayload(identifier, bytes);
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CT.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CT.java
new file mode 100644 (file)
index 0000000..2530d33
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.io.StreamCorruptedException;
+import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload.Chunked;
+import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload.Simple;
+import org.opendaylight.controller.cluster.io.ChunkedByteArray;
+
+/**
+ * Serialization proxy for {@link CommitTransactionPayload}.
+ */
+final class CT implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private CommitTransactionPayload payload;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public CT() {
+        // For Externalizable
+    }
+
+    CT(final CommitTransactionPayload payload) {
+        this.payload = requireNonNull(payload);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        out.writeInt(payload.size());
+        payload.writeBytes(out);
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException {
+        final int length = in.readInt();
+        if (length < 0) {
+            throw new StreamCorruptedException("Invalid payload length " + length);
+        } else if (length < CommitTransactionPayload.MAX_ARRAY_SIZE) {
+            final byte[] serialized = new byte[length];
+            in.readFully(serialized);
+            payload = new Simple(serialized);
+        } else {
+            payload = new Chunked(ChunkedByteArray.readFrom(in, length, CommitTransactionPayload.MAX_ARRAY_SIZE));
+        }
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(payload);
+    }
+}
index 9acc113a422b9b962e122a5ad11c94c43c69be3b..9d6f526616156da4d3def73f9905ac1a7496419c 100644 (file)
@@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.datastore.persisted;
 
 import com.google.common.io.ByteArrayDataOutput;
 import com.google.common.io.ByteStreams;
-import java.io.DataInput;
 import java.io.IOException;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.slf4j.Logger;
@@ -21,34 +20,10 @@ import org.slf4j.LoggerFactory;
  * @author Robert Varga
  */
 public final class CloseLocalHistoryPayload extends AbstractIdentifiablePayload<LocalHistoryIdentifier> {
-    private static final class Proxy extends AbstractProxy<LocalHistoryIdentifier> {
-        private static final long serialVersionUID = 1L;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final byte[] serialized) {
-            super(serialized);
-        }
-
-        @Override
-        protected LocalHistoryIdentifier readIdentifier(final DataInput in) throws IOException {
-            return LocalHistoryIdentifier.readFrom(in);
-        }
-
-        @Override
-        protected CloseLocalHistoryPayload createObject(final LocalHistoryIdentifier identifier,
-                final byte[] serialized) {
-            return new CloseLocalHistoryPayload(identifier, serialized);
-        }
-    }
-
     private static final Logger LOG = LoggerFactory.getLogger(CloseLocalHistoryPayload.class);
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+    private static final int PROXY_SIZE = externalizableProxySize(CH::new);
 
     CloseLocalHistoryPayload(final LocalHistoryIdentifier historyId, final byte[] serialized) {
         super(historyId, serialized);
@@ -62,13 +37,18 @@ public final class CloseLocalHistoryPayload extends AbstractIdentifiablePayload<
         } catch (IOException e) {
             // This should never happen
             LOG.error("Failed to serialize {}", historyId, e);
-            throw new RuntimeException("Failed to serialize " + historyId, e);
+            throw new IllegalStateException("Failed to serialize " + historyId, e);
         }
         return new CloseLocalHistoryPayload(historyId, out.toByteArray());
     }
 
     @Override
-    protected Proxy externalizableProxy(final byte[] serialized) {
-        return new Proxy(serialized);
+    protected DH externalizableProxy(final byte[] serialized) {
+        return new DH(serialized);
+    }
+
+    @Override
+    protected int externalizableProxySize() {
+        return PROXY_SIZE;
     }
 }
index 5337530ece249128e078b7a88fa4a0b071c7cb1b..45cbcc851a80ead2bf0508d024a61f2cd6cc3a09 100644 (file)
@@ -7,35 +7,31 @@
  */
 package org.opendaylight.controller.cluster.datastore.persisted;
 
-import static com.google.common.base.Verify.verifyNotNull;
 import static com.google.common.math.IntMath.ceilingPowerOfTwo;
 import static java.util.Objects.requireNonNull;
 
 import com.google.common.annotations.Beta;
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.MoreObjects;
 import com.google.common.io.ByteStreams;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.DataInput;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
-import java.io.Externalizable;
 import java.io.IOException;
-import java.io.ObjectInput;
 import java.io.ObjectOutput;
 import java.io.Serializable;
-import java.io.StreamCorruptedException;
-import java.util.AbstractMap.SimpleImmutableEntry;
-import java.util.Map.Entry;
+import org.apache.commons.lang3.SerializationUtils;
 import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.NonNullByDefault;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.persisted.DataTreeCandidateInputOutput.DataTreeCandidateWithVersion;
 import org.opendaylight.controller.cluster.io.ChunkedByteArray;
 import org.opendaylight.controller.cluster.io.ChunkedOutputStream;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.IdentifiablePayload;
-import org.opendaylight.yangtools.concepts.Either;
+import org.opendaylight.controller.cluster.raft.messages.IdentifiablePayload;
 import org.opendaylight.yangtools.yang.data.api.schema.stream.ReusableStreamReceiver;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
 import org.opendaylight.yangtools.yang.data.impl.schema.ReusableImmutableNormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -46,30 +42,42 @@ import org.slf4j.LoggerFactory;
  * @author Robert Varga
  */
 @Beta
-public abstract class CommitTransactionPayload extends IdentifiablePayload<TransactionIdentifier>
+public abstract sealed class CommitTransactionPayload extends IdentifiablePayload<TransactionIdentifier>
         implements Serializable {
+    @NonNullByDefault
+    public record CandidateTransaction(
+            TransactionIdentifier transactionId,
+            DataTreeCandidate candidate,
+            NormalizedNodeStreamVersion streamVersion) {
+        public CandidateTransaction {
+            requireNonNull(transactionId);
+            requireNonNull(candidate);
+            requireNonNull(streamVersion);
+        }
+    }
+
     private static final Logger LOG = LoggerFactory.getLogger(CommitTransactionPayload.class);
     private static final long serialVersionUID = 1L;
 
-    private static final int MAX_ARRAY_SIZE = ceilingPowerOfTwo(Integer.getInteger(
+    static final int MAX_ARRAY_SIZE = ceilingPowerOfTwo(Integer.getInteger(
         "org.opendaylight.controller.cluster.datastore.persisted.max-array-size", 256 * 1024));
 
-    private volatile Entry<TransactionIdentifier, DataTreeCandidateWithVersion> candidate = null;
-
-    CommitTransactionPayload() {
+    private volatile CandidateTransaction candidate = null;
 
+    private CommitTransactionPayload() {
+        // hidden on purpose
     }
 
     public static @NonNull CommitTransactionPayload create(final TransactionIdentifier transactionId,
             final DataTreeCandidate candidate, final PayloadVersion version, final int initialSerializedBufferCapacity)
                     throws IOException {
-        final ChunkedOutputStream cos = new ChunkedOutputStream(initialSerializedBufferCapacity, MAX_ARRAY_SIZE);
-        try (DataOutputStream dos = new DataOutputStream(cos)) {
+        final var cos = new ChunkedOutputStream(initialSerializedBufferCapacity, MAX_ARRAY_SIZE);
+        try (var dos = new DataOutputStream(cos)) {
             transactionId.writeTo(dos);
             DataTreeCandidateInputOutput.writeDataTreeCandidate(dos, version, candidate);
         }
 
-        final Either<byte[], ChunkedByteArray> source = cos.toVariant();
+        final var source = cos.toVariant();
         LOG.debug("Initial buffer capacity {}, actual serialized size {}", initialSerializedBufferCapacity, cos.size());
         return source.isFirst() ? new Simple(source.getFirst()) : new Chunked(source.getSecond());
     }
@@ -86,8 +94,8 @@ public abstract class CommitTransactionPayload extends IdentifiablePayload<Trans
         return create(transactionId, candidate, PayloadVersion.current());
     }
 
-    public @NonNull Entry<TransactionIdentifier, DataTreeCandidateWithVersion> getCandidate() throws IOException {
-        Entry<TransactionIdentifier, DataTreeCandidateWithVersion> localCandidate = candidate;
+    public @NonNull CandidateTransaction getCandidate() throws IOException {
+        var localCandidate = candidate;
         if (localCandidate == null) {
             synchronized (this) {
                 localCandidate = candidate;
@@ -99,42 +107,61 @@ public abstract class CommitTransactionPayload extends IdentifiablePayload<Trans
         return localCandidate;
     }
 
-    public final @NonNull Entry<TransactionIdentifier, DataTreeCandidateWithVersion> getCandidate(
-            final ReusableStreamReceiver receiver) throws IOException {
-        final DataInput in = newDataInput();
-        return new SimpleImmutableEntry<>(TransactionIdentifier.readFrom(in),
-                DataTreeCandidateInputOutput.readDataTreeCandidate(in, receiver));
+    public final @NonNull CandidateTransaction getCandidate(final ReusableStreamReceiver receiver) throws IOException {
+        final var in = newDataInput();
+        final var transactionId = TransactionIdentifier.readFrom(in);
+        final var readCandidate = DataTreeCandidateInputOutput.readDataTreeCandidate(in, receiver);
+
+        return new CandidateTransaction(transactionId, readCandidate.candidate(), readCandidate.version());
     }
 
     @Override
     public TransactionIdentifier getIdentifier() {
         try  {
-            return getCandidate().getKey();
+            return getCandidate().transactionId();
         } catch (IOException e) {
             throw new IllegalStateException("Candidate deserialization failed.", e);
         }
     }
 
+    @Override
+    public final int serializedSize() {
+        // TODO: this is not entirely accurate as the the byte[] can be chunked by the serialization stream
+        return ProxySizeHolder.PROXY_SIZE + size();
+    }
+
     /**
      * The cached candidate needs to be cleared after it is done applying to the DataTree, otherwise it would be keeping
      * deserialized in memory which are not needed anymore leading to wasted memory. This lets the payload know that
      * this was the last time the candidate was needed ant it is safe to be cleared.
      */
-    public Entry<TransactionIdentifier, DataTreeCandidateWithVersion> acquireCandidate() throws IOException {
-        final Entry<TransactionIdentifier, DataTreeCandidateWithVersion> localCandidate = getCandidate();
+    public @NonNull CandidateTransaction acquireCandidate() throws IOException {
+        final var localCandidate = getCandidate();
         candidate = null;
         return localCandidate;
     }
 
+    @Override
+    public final String toString() {
+        final var helper = MoreObjects.toStringHelper(this);
+        final var localCandidate = candidate;
+        if (localCandidate != null) {
+            helper.add("identifier", candidate.transactionId());
+        }
+        return helper.add("size", size()).toString();
+    }
+
     abstract void writeBytes(ObjectOutput out) throws IOException;
 
     abstract DataInput newDataInput();
 
-    final Object writeReplace() {
-        return new Proxy(this);
+    @Override
+    public final Object writeReplace() {
+        return new CT(this);
     }
 
-    private static final class Simple extends CommitTransactionPayload {
+    static final class Simple extends CommitTransactionPayload {
+        @java.io.Serial
         private static final long serialVersionUID = 1L;
 
         private final byte[] serialized;
@@ -159,7 +186,8 @@ public abstract class CommitTransactionPayload extends IdentifiablePayload<Trans
         }
     }
 
-    private static final class Chunked extends CommitTransactionPayload {
+    static final class Chunked extends CommitTransactionPayload {
+        @java.io.Serial
         private static final long serialVersionUID = 1L;
 
         @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "Handled via serialization proxy")
@@ -185,44 +213,12 @@ public abstract class CommitTransactionPayload extends IdentifiablePayload<Trans
         }
     }
 
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private CommitTransactionPayload payload;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final CommitTransactionPayload payload) {
-            this.payload = requireNonNull(payload);
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeInt(payload.size());
-            payload.writeBytes(out);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException {
-            final int length = in.readInt();
-            if (length < 0) {
-                throw new StreamCorruptedException("Invalid payload length " + length);
-            } else if (length < MAX_ARRAY_SIZE) {
-                final byte[] serialized = new byte[length];
-                in.readFully(serialized);
-                payload = new Simple(serialized);
-            } else {
-                payload = new Chunked(ChunkedByteArray.readFrom(in, length, MAX_ARRAY_SIZE));
-            }
-        }
+    // Exists to break initialization dependency between CommitTransactionPayload/Simple/Proxy
+    private static final class ProxySizeHolder {
+        static final int PROXY_SIZE = SerializationUtils.serialize(new CT(new Simple(new byte[0]))).length;
 
-        private Object readResolve() {
-            return verifyNotNull(payload);
+        private ProxySizeHolder() {
+            // Hidden on purpose
         }
     }
 }
index dbf72f38d8de0f0016a4651c3ef46fcebe5e4760..928503a9fc78ce7f62cfd71a1b903800e56cc1a9 100644 (file)
@@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.datastore.persisted;
 
 import com.google.common.io.ByteArrayDataOutput;
 import com.google.common.io.ByteStreams;
-import java.io.DataInput;
 import java.io.IOException;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.slf4j.Logger;
@@ -21,34 +20,10 @@ import org.slf4j.LoggerFactory;
  * @author Robert Varga
  */
 public final class CreateLocalHistoryPayload extends AbstractIdentifiablePayload<LocalHistoryIdentifier> {
-    private static final class Proxy extends AbstractProxy<LocalHistoryIdentifier> {
-        private static final long serialVersionUID = 1L;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final byte[] serialized) {
-            super(serialized);
-        }
-
-        @Override
-        protected LocalHistoryIdentifier readIdentifier(final DataInput in) throws IOException {
-            return LocalHistoryIdentifier.readFrom(in);
-        }
-
-        @Override
-        protected CreateLocalHistoryPayload createObject(final LocalHistoryIdentifier identifier,
-                final byte[] serialized) {
-            return new CreateLocalHistoryPayload(identifier, serialized);
-        }
-    }
-
     private static final Logger LOG = LoggerFactory.getLogger(CreateLocalHistoryPayload.class);
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+    private static final int PROXY_SIZE = externalizableProxySize(CH::new);
 
     CreateLocalHistoryPayload(final LocalHistoryIdentifier historyId, final byte[] serialized) {
         super(historyId, serialized);
@@ -62,13 +37,18 @@ public final class CreateLocalHistoryPayload extends AbstractIdentifiablePayload
         } catch (IOException e) {
             // This should never happen
             LOG.error("Failed to serialize {}", historyId, e);
-            throw new RuntimeException("Failed to serialize " + historyId, e);
+            throw new IllegalStateException("Failed to serialize " + historyId, e);
         }
         return new CreateLocalHistoryPayload(historyId, out.toByteArray());
     }
 
     @Override
-    protected Proxy externalizableProxy(final byte[] serialized) {
-        return new Proxy(serialized);
+    protected CH externalizableProxy(final byte[] serialized) {
+        return new CH(serialized);
+    }
+
+    @Override
+    protected int externalizableProxySize() {
+        return PROXY_SIZE;
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DH.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DH.java
new file mode 100644 (file)
index 0000000..4ce29b1
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.io.ByteStreams;
+import java.io.IOException;
+import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
+import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload.SerialForm;
+
+/**
+ * Serialization proxy for {@link CloseLocalHistoryPayload}.
+ */
+final class DH implements SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private LocalHistoryIdentifier identifier;
+    private byte[] bytes;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public DH() {
+        // For Externalizable
+    }
+
+    DH(final byte[] bytes) {
+        this.bytes = requireNonNull(bytes);
+    }
+
+    @Override
+    public byte[] bytes() {
+        return bytes;
+    }
+
+    @Override
+    public void readExternal(final byte[] newBytes) throws IOException {
+        bytes = requireNonNull(newBytes);
+        identifier = verifyNotNull(LocalHistoryIdentifier.readFrom(ByteStreams.newDataInput(newBytes)));
+    }
+
+    @Override
+    public Object readResolve() {
+        return new CloseLocalHistoryPayload(identifier, bytes);
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DS.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DS.java
new file mode 100644 (file)
index 0000000..091eeed
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.ArrayList;
+import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot.ShardSnapshot;
+
+/**
+ * Serialization proxy for {@link DatastoreSnapshot}.
+ */
+final class DS implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private DatastoreSnapshot datastoreSnapshot;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public DS() {
+        // For Externalizable
+    }
+
+    DS(final DatastoreSnapshot datastoreSnapshot) {
+        this.datastoreSnapshot = requireNonNull(datastoreSnapshot);
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        final var type = (String) in.readObject();
+        final var snapshot = (ShardManagerSnapshot) in.readObject();
+
+        final int size = in.readInt();
+        var localShardSnapshots = new ArrayList<ShardSnapshot>(size);
+        for (int i = 0; i < size; i++) {
+            localShardSnapshots.add((ShardSnapshot) in.readObject());
+        }
+
+        datastoreSnapshot = new DatastoreSnapshot(type, snapshot, localShardSnapshots);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        out.writeObject(datastoreSnapshot.getType());
+        out.writeObject(datastoreSnapshot.getShardManagerSnapshot());
+
+        final var shardSnapshots = datastoreSnapshot.getShardSnapshots();
+        out.writeInt(shardSnapshots.size());
+        for (var shardSnapshot : shardSnapshots) {
+            out.writeObject(shardSnapshot);
+        }
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(datastoreSnapshot);
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DSS.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DSS.java
new file mode 100644 (file)
index 0000000..9edb090
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot.ShardSnapshot;
+import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
+
+/**
+ * Serialization proxy for {@link ShardDataTreeSnapshot}.
+ */
+final class DSS implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ShardSnapshot shardSnapshot;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public DSS() {
+        // For Externalizable
+    }
+
+    DSS(final ShardSnapshot shardSnapshot) {
+        this.shardSnapshot = requireNonNull(shardSnapshot);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        out.writeObject(shardSnapshot.getName());
+        out.writeObject(shardSnapshot.getSnapshot());
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        shardSnapshot = new ShardSnapshot((String) in.readObject(), (Snapshot) in.readObject());
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(shardSnapshot);
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DT.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DT.java
new file mode 100644 (file)
index 0000000..cc1a948
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.io.ByteStreams;
+import java.io.IOException;
+import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
+import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload.SerialForm;
+
+/**
+ * Serialization proxy for {@link DisableTrackingPayload}.
+ */
+final class DT implements SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ClientIdentifier identifier;
+    private byte[] bytes;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public DT() {
+        // For Externalizable
+    }
+
+    DT(final byte[] bytes) {
+        this.bytes = requireNonNull(bytes);
+    }
+
+    @Override
+    public byte[] bytes() {
+        return bytes;
+    }
+
+    @Override
+    public void readExternal(final byte[] newBytes) throws IOException {
+        bytes = requireNonNull(newBytes);
+        identifier = verifyNotNull(ClientIdentifier.readFrom(ByteStreams.newDataInput(newBytes)));
+    }
+
+    @Override
+    public Object readResolve() {
+        return new DisableTrackingPayload(identifier, bytes);
+    }
+}
index ee829e8a13ce27644d5951d98f0e393691c58f6c..a5a76e06af8af2c9eda5daf6d40b9d9eecaa851c 100644 (file)
@@ -11,36 +11,41 @@ import static java.util.Objects.requireNonNull;
 
 import com.google.common.annotations.Beta;
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.ImmutableList;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
-import org.eclipse.jdt.annotation.NonNullByDefault;
+import java.util.List;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.yangtools.concepts.Immutable;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
 import org.opendaylight.yangtools.yang.data.api.schema.stream.ReusableStreamReceiver;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNodes;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
 import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
 import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
 import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
+import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidateNodes;
+import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidates;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
  * Utility serialization/deserialization for {@link DataTreeCandidate}. Note that this utility does not maintain
  * before-image information across serialization.
- *
- * @author Robert Varga
  */
 @Beta
 public final class DataTreeCandidateInputOutput {
+    public record DataTreeCandidateWithVersion(
+            @NonNull DataTreeCandidate candidate,
+            @NonNull NormalizedNodeStreamVersion version) implements Immutable {
+        public DataTreeCandidateWithVersion {
+            requireNonNull(candidate);
+            requireNonNull(version);
+        }
+    }
+
     private static final Logger LOG = LoggerFactory.getLogger(DataTreeCandidateInputOutput.class);
     private static final byte DELETE = 0;
     private static final byte SUBTREE_MODIFIED = 1;
@@ -55,26 +60,26 @@ public final class DataTreeCandidateInputOutput {
 
     private static DataTreeCandidateNode readModifiedNode(final ModificationType type, final NormalizedNodeDataInput in,
             final ReusableStreamReceiver receiver) throws IOException {
-        final PathArgument identifier = in.readPathArgument();
-        final Collection<DataTreeCandidateNode> children = readChildren(in, receiver);
+        final var pathArg = in.readPathArgument();
+        final var children = readChildren(in, receiver);
         if (children.isEmpty()) {
-            LOG.debug("Modified node {} does not have any children, not instantiating it", identifier);
+            LOG.debug("Modified node {} does not have any children, not instantiating it", pathArg);
             return null;
         }
 
-        return ModifiedDataTreeCandidateNode.create(identifier, type, children);
+        return ModifiedDataTreeCandidateNode.create(pathArg, type, children);
     }
 
-    private static Collection<DataTreeCandidateNode> readChildren(final NormalizedNodeDataInput in,
+    private static List<DataTreeCandidateNode> readChildren(final NormalizedNodeDataInput in,
             final ReusableStreamReceiver receiver) throws IOException {
         final int size = in.readInt();
         if (size == 0) {
-            return ImmutableList.of();
+            return List.of();
         }
 
-        final Collection<DataTreeCandidateNode> ret = new ArrayList<>(size);
+        final var ret = new ArrayList<DataTreeCandidateNode>(size);
         for (int i = 0; i < size; ++i) {
-            final DataTreeCandidateNode child = readNode(in, receiver);
+            final var child = readNode(in, receiver);
             if (child != null) {
                 ret.add(child);
             }
@@ -85,77 +90,35 @@ public final class DataTreeCandidateInputOutput {
     private static DataTreeCandidateNode readNode(final NormalizedNodeDataInput in,
             final ReusableStreamReceiver receiver) throws IOException {
         final byte type = in.readByte();
-        switch (type) {
-            case APPEARED:
-                return readModifiedNode(ModificationType.APPEARED, in, receiver);
-            case DELETE:
-                return DeletedDataTreeCandidateNode.create(in.readPathArgument());
-            case DISAPPEARED:
-                return readModifiedNode(ModificationType.DISAPPEARED, in, receiver);
-            case SUBTREE_MODIFIED:
-                return readModifiedNode(ModificationType.SUBTREE_MODIFIED, in, receiver);
-            case UNMODIFIED:
-                return null;
-            case WRITE:
-                return DataTreeCandidateNodes.written(in.readNormalizedNode(receiver));
-            default:
-                throw new IllegalArgumentException("Unhandled node type " + type);
-        }
-    }
-
-    @NonNullByDefault
-    public static final class DataTreeCandidateWithVersion implements Immutable {
-        private final DataTreeCandidate candidate;
-        private final NormalizedNodeStreamVersion version;
-
-        public DataTreeCandidateWithVersion(final DataTreeCandidate candidate,
-                final NormalizedNodeStreamVersion version) {
-            this.candidate = requireNonNull(candidate);
-            this.version = requireNonNull(version);
-        }
-
-        public DataTreeCandidate getCandidate() {
-            return candidate;
-        }
-
-        public NormalizedNodeStreamVersion getVersion() {
-            return version;
-        }
+        return switch (type) {
+            case APPEARED -> readModifiedNode(ModificationType.APPEARED, in, receiver);
+            case DELETE -> DeletedDataTreeCandidateNode.create(in.readPathArgument());
+            case DISAPPEARED -> readModifiedNode(ModificationType.DISAPPEARED, in, receiver);
+            case SUBTREE_MODIFIED -> readModifiedNode(ModificationType.SUBTREE_MODIFIED, in, receiver);
+            case UNMODIFIED -> null;
+            case WRITE -> DataTreeCandidateNodes.written(in.readNormalizedNode(receiver));
+            default -> throw new IllegalArgumentException("Unhandled node type " + type);
+        };
     }
 
     public static DataTreeCandidateWithVersion readDataTreeCandidate(final DataInput in,
             final ReusableStreamReceiver receiver) throws IOException {
-        final NormalizedNodeDataInput reader = NormalizedNodeDataInput.newDataInput(in);
-        final YangInstanceIdentifier rootPath = reader.readYangInstanceIdentifier();
+        final var reader = NormalizedNodeDataInput.newDataInput(in);
+        final var rootPath = reader.readYangInstanceIdentifier();
         final byte type = reader.readByte();
 
-        final DataTreeCandidateNode rootNode;
-        switch (type) {
-            case APPEARED:
-                rootNode = ModifiedDataTreeCandidateNode.create(ModificationType.APPEARED,
-                    readChildren(reader, receiver));
-                break;
-            case DELETE:
-                rootNode = DeletedDataTreeCandidateNode.create();
-                break;
-            case DISAPPEARED:
-                rootNode = ModifiedDataTreeCandidateNode.create(ModificationType.DISAPPEARED,
-                    readChildren(reader, receiver));
-                break;
-            case SUBTREE_MODIFIED:
-                rootNode = ModifiedDataTreeCandidateNode.create(ModificationType.SUBTREE_MODIFIED,
-                        readChildren(reader, receiver));
-                break;
-            case WRITE:
-                rootNode = DataTreeCandidateNodes.written(reader.readNormalizedNode(receiver));
-                break;
-            case UNMODIFIED:
-                rootNode = AbstractDataTreeCandidateNode.createUnmodified();
-                break;
-            default:
-                throw new IllegalArgumentException("Unhandled node type " + type);
-        }
-
+        final DataTreeCandidateNode rootNode = switch (type) {
+            case APPEARED -> ModifiedDataTreeCandidateNode.create(ModificationType.APPEARED,
+                readChildren(reader, receiver));
+            case DELETE -> DeletedDataTreeCandidateNode.create();
+            case DISAPPEARED -> ModifiedDataTreeCandidateNode.create(ModificationType.DISAPPEARED,
+                readChildren(reader, receiver));
+            case SUBTREE_MODIFIED -> ModifiedDataTreeCandidateNode.create(ModificationType.SUBTREE_MODIFIED,
+                readChildren(reader, receiver));
+            case WRITE -> DataTreeCandidateNodes.written(reader.readNormalizedNode(receiver));
+            case UNMODIFIED -> AbstractDataTreeCandidateNode.createUnmodified();
+            default -> throw new IllegalArgumentException("Unhandled node type " + type);
+        };
         return new DataTreeCandidateWithVersion(DataTreeCandidates.newDataTreeCandidate(rootPath, rootNode),
             reader.getVersion());
     }
@@ -163,77 +126,69 @@ public final class DataTreeCandidateInputOutput {
     private static void writeChildren(final NormalizedNodeDataOutput out,
             final Collection<DataTreeCandidateNode> children) throws IOException {
         out.writeInt(children.size());
-        for (DataTreeCandidateNode child : children) {
+        for (var child : children) {
             writeNode(out, child);
         }
     }
 
     private static void writeNode(final NormalizedNodeDataOutput out, final DataTreeCandidateNode node)
             throws IOException {
-        switch (node.getModificationType()) {
-            case APPEARED:
+        switch (node.modificationType()) {
+            case APPEARED -> {
                 out.writeByte(APPEARED);
-                out.writePathArgument(node.getIdentifier());
-                writeChildren(out, node.getChildNodes());
-                break;
-            case DELETE:
+                out.writePathArgument(node.name());
+                writeChildren(out, node.childNodes());
+            }
+            case DELETE -> {
                 out.writeByte(DELETE);
-                out.writePathArgument(node.getIdentifier());
-                break;
-            case DISAPPEARED:
+                out.writePathArgument(node.name());
+            }
+            case DISAPPEARED -> {
                 out.writeByte(DISAPPEARED);
-                out.writePathArgument(node.getIdentifier());
-                writeChildren(out, node.getChildNodes());
-                break;
-            case SUBTREE_MODIFIED:
+                out.writePathArgument(node.name());
+                writeChildren(out, node.childNodes());
+            }
+            case SUBTREE_MODIFIED -> {
                 out.writeByte(SUBTREE_MODIFIED);
-                out.writePathArgument(node.getIdentifier());
-                writeChildren(out, node.getChildNodes());
-                break;
-            case WRITE:
+                out.writePathArgument(node.name());
+                writeChildren(out, node.childNodes());
+            }
+            case WRITE -> {
                 out.writeByte(WRITE);
-                out.writeNormalizedNode(node.getDataAfter().get());
-                break;
-            case UNMODIFIED:
-                out.writeByte(UNMODIFIED);
-                break;
-            default:
-                throwUnhandledNodeType(node);
+                out.writeNormalizedNode(node.getDataAfter());
+            }
+            case UNMODIFIED -> out.writeByte(UNMODIFIED);
+            default -> throwUnhandledNodeType(node);
         }
     }
 
     @VisibleForTesting
     public static void writeDataTreeCandidate(final DataOutput out, final PayloadVersion version,
             final DataTreeCandidate candidate) throws IOException {
-        try (NormalizedNodeDataOutput writer = version.getStreamVersion().newDataOutput(out)) {
+        try (var writer = version.getStreamVersion().newDataOutput(out)) {
             writer.writeYangInstanceIdentifier(candidate.getRootPath());
 
-            final DataTreeCandidateNode node = candidate.getRootNode();
-            switch (node.getModificationType()) {
-                case APPEARED:
+            final var node = candidate.getRootNode();
+            switch (node.modificationType()) {
+                case APPEARED -> {
                     writer.writeByte(APPEARED);
-                    writeChildren(writer, node.getChildNodes());
-                    break;
-                case DELETE:
-                    writer.writeByte(DELETE);
-                    break;
-                case DISAPPEARED:
+                    writeChildren(writer, node.childNodes());
+                }
+                case DELETE -> writer.writeByte(DELETE);
+                case DISAPPEARED -> {
                     writer.writeByte(DISAPPEARED);
-                    writeChildren(writer, node.getChildNodes());
-                    break;
-                case SUBTREE_MODIFIED:
+                    writeChildren(writer, node.childNodes());
+                }
+                case SUBTREE_MODIFIED -> {
                     writer.writeByte(SUBTREE_MODIFIED);
-                    writeChildren(writer, node.getChildNodes());
-                    break;
-                case UNMODIFIED:
-                    writer.writeByte(UNMODIFIED);
-                    break;
-                case WRITE:
+                    writeChildren(writer, node.childNodes());
+                }
+                case UNMODIFIED -> writer.writeByte(UNMODIFIED);
+                case WRITE -> {
                     writer.writeByte(WRITE);
-                    writer.writeNormalizedNode(node.getDataAfter().get());
-                    break;
-                default:
-                    throwUnhandledNodeType(node);
+                    writer.writeNormalizedNode(node.getDataAfter());
+                }
+                default -> throwUnhandledNodeType(node);
             }
         }
     }
@@ -244,6 +199,6 @@ public final class DataTreeCandidateInputOutput {
     }
 
     private static void throwUnhandledNodeType(final DataTreeCandidateNode node) {
-        throw new IllegalArgumentException("Unhandled node type " + node.getModificationType());
+        throw new IllegalArgumentException("Unhandled node type " + node.modificationType());
     }
 }
index 6330e5d4c257343a135f62b6e9ac3a8dcd429923..9c0a3acc72d12c4a406ef4085aca068f5c9ee29b 100644 (file)
@@ -10,12 +10,7 @@ package org.opendaylight.controller.cluster.datastore.persisted;
 import static java.util.Objects.requireNonNull;
 
 import com.google.common.collect.ImmutableList;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import java.io.Serializable;
-import java.util.ArrayList;
 import java.util.List;
 import org.eclipse.jdt.annotation.NonNull;
 import org.eclipse.jdt.annotation.Nullable;
@@ -26,61 +21,16 @@ import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
  *
  * @author Thomas Pantelis
  */
-public class DatastoreSnapshot implements Serializable {
+public final class DatastoreSnapshot implements Serializable {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private DatastoreSnapshot datastoreSnapshot;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final DatastoreSnapshot datastoreSnapshot) {
-            this.datastoreSnapshot = datastoreSnapshot;
-        }
-
-        @Override
-        public void writeExternal(ObjectOutput out) throws IOException {
-            out.writeObject(datastoreSnapshot.type);
-            out.writeObject(datastoreSnapshot.shardManagerSnapshot);
-
-            out.writeInt(datastoreSnapshot.shardSnapshots.size());
-            for (ShardSnapshot shardSnapshot: datastoreSnapshot.shardSnapshots) {
-                out.writeObject(shardSnapshot);
-            }
-        }
-
-        @Override
-        public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-            String localType = (String)in.readObject();
-            ShardManagerSnapshot localShardManagerSnapshot = (ShardManagerSnapshot) in.readObject();
-
-            int size = in.readInt();
-            List<ShardSnapshot> localShardSnapshots = new ArrayList<>(size);
-            for (int i = 0; i < size; i++) {
-                localShardSnapshots.add((ShardSnapshot) in.readObject());
-            }
-
-            datastoreSnapshot = new DatastoreSnapshot(localType, localShardManagerSnapshot, localShardSnapshots);
-        }
-
-        private Object readResolve() {
-            return datastoreSnapshot;
-        }
-    }
-
-    private final String type;
+    private final @NonNull String type;
     private final ShardManagerSnapshot shardManagerSnapshot;
-    private final List<ShardSnapshot> shardSnapshots;
+    private final @NonNull ImmutableList<ShardSnapshot> shardSnapshots;
 
-    public DatastoreSnapshot(@NonNull String type, @Nullable ShardManagerSnapshot shardManagerSnapshot,
-            @NonNull List<ShardSnapshot> shardSnapshots) {
+    public DatastoreSnapshot(final @NonNull String type, final @Nullable ShardManagerSnapshot shardManagerSnapshot,
+            final @NonNull List<ShardSnapshot> shardSnapshots) {
         this.type = requireNonNull(type);
         this.shardManagerSnapshot = shardManagerSnapshot;
         this.shardSnapshots = ImmutableList.copyOf(shardSnapshots);
@@ -98,49 +48,19 @@ public class DatastoreSnapshot implements Serializable {
         return shardSnapshots;
     }
 
+    @java.io.Serial
     private Object writeReplace() {
-        return new Proxy(this);
+        return new DS(this);
     }
 
-    public static class ShardSnapshot implements Serializable {
+    public static final class ShardSnapshot implements Serializable {
+        @java.io.Serial
         private static final long serialVersionUID = 1L;
 
-        private static final class Proxy implements Externalizable {
-            private static final long serialVersionUID = 1L;
-
-            private ShardSnapshot shardSnapshot;
-
-            // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-            // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-            @SuppressWarnings("checkstyle:RedundantModifier")
-            public Proxy() {
-                // For Externalizable
-            }
-
-            Proxy(final ShardSnapshot shardSnapshot) {
-                this.shardSnapshot = shardSnapshot;
-            }
-
-            @Override
-            public void writeExternal(ObjectOutput out) throws IOException {
-                out.writeObject(shardSnapshot.name);
-                out.writeObject(shardSnapshot.snapshot);
-            }
-
-            @Override
-            public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-                shardSnapshot = new ShardSnapshot((String)in.readObject(), (Snapshot) in.readObject());
-            }
-
-            private Object readResolve() {
-                return shardSnapshot;
-            }
-        }
-
-        private final String name;
-        private final Snapshot snapshot;
+        private final @NonNull String name;
+        private final @NonNull Snapshot snapshot;
 
-        public ShardSnapshot(@NonNull String name, @NonNull Snapshot snapshot) {
+        public ShardSnapshot(final @NonNull String name, final @NonNull Snapshot snapshot) {
             this.name = requireNonNull(name);
             this.snapshot = requireNonNull(snapshot);
         }
@@ -153,8 +73,9 @@ public class DatastoreSnapshot implements Serializable {
             return snapshot;
         }
 
+        @java.io.Serial
         private Object writeReplace() {
-            return new Proxy(this);
+            return new DSS(this);
         }
     }
 }
index de386f1757fa5bf16741c6609b215d38b3ce9873..54e9e98d517bb095eb066d5d09f2f45c73be3ea4 100644 (file)
@@ -8,11 +8,10 @@
 package org.opendaylight.controller.cluster.datastore.persisted;
 
 import java.util.Collection;
-import java.util.Optional;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
 
 /**
  * A deserialized {@link DataTreeCandidateNode} which represents a deletion.
@@ -25,7 +24,7 @@ abstract class DeletedDataTreeCandidateNode extends AbstractDataTreeCandidateNod
     static DataTreeCandidateNode create() {
         return new DeletedDataTreeCandidateNode() {
             @Override
-            public PathArgument getIdentifier() {
+            public PathArgument name() {
                 throw new UnsupportedOperationException("Root node does not have an identifier");
             }
         };
@@ -34,21 +33,20 @@ abstract class DeletedDataTreeCandidateNode extends AbstractDataTreeCandidateNod
     static DataTreeCandidateNode create(final PathArgument identifier) {
         return new DeletedDataTreeCandidateNode() {
             @Override
-            public PathArgument getIdentifier() {
+            public PathArgument name() {
                 return identifier;
             }
         };
     }
 
     @Override
-    public final Optional<NormalizedNode> getDataAfter() {
-        return Optional.empty();
+    public final NormalizedNode dataAfter() {
+        return null;
     }
 
     @Override
-    public final Collection<DataTreeCandidateNode> getChildNodes() {
-        // We would require the before-image to reconstruct the list of nodes which
-        // were deleted.
+    public final Collection<DataTreeCandidateNode> childNodes() {
+        // We would require the before-image to reconstruct the list of nodes which were deleted.
         throw new UnsupportedOperationException("Children not available after serialization");
     }
 }
index 29dd0725245e761a2551a2cf00bab41b6c6e35f8..293f396a948b3c2a81fadc11554149b6748de9c3 100644 (file)
@@ -9,37 +9,16 @@ package org.opendaylight.controller.cluster.datastore.persisted;
 
 import com.google.common.io.ByteArrayDataOutput;
 import com.google.common.io.ByteStreams;
-import java.io.DataInput;
 import java.io.IOException;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public final class DisableTrackingPayload extends AbstractIdentifiablePayload<ClientIdentifier> {
-    private static final class Proxy extends AbstractProxy<ClientIdentifier> {
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final byte[] serialized) {
-            super(serialized);
-        }
-
-        @Override
-        protected ClientIdentifier readIdentifier(final DataInput in) throws IOException {
-            return ClientIdentifier.readFrom(in);
-        }
-
-        @Override
-        protected DisableTrackingPayload createObject(final ClientIdentifier identifier,
-                final byte[] serialized) {
-            return new DisableTrackingPayload(identifier, serialized);
-        }
-    }
-
     private static final Logger LOG = LoggerFactory.getLogger(DisableTrackingPayload.class);
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+    private static final int PROXY_SIZE = externalizableProxySize(DT::new);
 
     DisableTrackingPayload(final ClientIdentifier clientId, final byte[] serialized) {
         super(clientId, serialized);
@@ -53,13 +32,18 @@ public final class DisableTrackingPayload extends AbstractIdentifiablePayload<Cl
         } catch (IOException e) {
             // This should never happen
             LOG.error("Failed to serialize {}", clientId, e);
-            throw new RuntimeException("Failed to serialize " + clientId, e);
+            throw new IllegalStateException("Failed to serialize " + clientId, e);
         }
         return new DisableTrackingPayload(clientId, out.toByteArray());
     }
 
     @Override
-    protected Proxy externalizableProxy(final byte[] serialized) {
-        return new Proxy(serialized);
+    protected DT externalizableProxy(final byte[] serialized) {
+        return new DT(serialized);
+    }
+
+    @Override
+    protected int externalizableProxySize() {
+        return PROXY_SIZE;
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/FM.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/FM.java
new file mode 100644 (file)
index 0000000..827a0cf
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import com.google.common.collect.ImmutableList;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.List;
+
+/**
+ * Externalizable proxy for {@link FrontendShardDataTreeSnapshotMetadata}.
+ */
+final class FM implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private List<FrontendClientMetadata> clients;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public FM() {
+        // For Externalizable
+    }
+
+    FM(final FrontendShardDataTreeSnapshotMetadata metadata) {
+        clients = metadata.getClients();
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        out.writeInt(clients.size());
+        for (var c : clients) {
+            c.writeTo(out);
+        }
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException {
+        final int size = in.readInt();
+        final var builder = ImmutableList.<FrontendClientMetadata>builderWithExpectedSize(size);
+        for (int i = 0; i < size ; ++i) {
+            builder.add(FrontendClientMetadata.readFrom(in));
+        }
+        clients = builder.build();
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return new FrontendShardDataTreeSnapshotMetadata(clients);
+    }
+}
\ No newline at end of file
index 35859bb1c947a33497aed15c59023c810d845f89..49573e247c6615e9ff79ef736573a969c749713e 100644 (file)
@@ -18,21 +18,24 @@ import java.util.Collection;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
-import org.opendaylight.yangtools.concepts.Identifiable;
 import org.opendaylight.yangtools.concepts.WritableObject;
 
-public final class FrontendClientMetadata implements Identifiable<ClientIdentifier>, WritableObject {
+public final class FrontendClientMetadata implements WritableObject {
     private final @NonNull ImmutableList<FrontendHistoryMetadata> currentHistories;
     private final @NonNull ImmutableUnsignedLongSet purgedHistories;
-    private final @NonNull ClientIdentifier identifier;
+    private final @NonNull ClientIdentifier clientId;
 
-    public FrontendClientMetadata(final ClientIdentifier identifier, final ImmutableUnsignedLongSet purgedHistories,
+    public FrontendClientMetadata(final ClientIdentifier clientId, final ImmutableUnsignedLongSet purgedHistories,
             final Collection<FrontendHistoryMetadata> currentHistories) {
-        this.identifier = requireNonNull(identifier);
+        this.clientId = requireNonNull(clientId);
         this.purgedHistories = requireNonNull(purgedHistories);
         this.currentHistories = ImmutableList.copyOf(currentHistories);
     }
 
+    public ClientIdentifier clientId() {
+        return clientId;
+    }
+
     public ImmutableList<FrontendHistoryMetadata> getCurrentHistories() {
         return currentHistories;
     }
@@ -41,14 +44,9 @@ public final class FrontendClientMetadata implements Identifiable<ClientIdentifi
         return purgedHistories;
     }
 
-    @Override
-    public ClientIdentifier getIdentifier() {
-        return identifier;
-    }
-
     @Override
     public void writeTo(final DataOutput out) throws IOException {
-        identifier.writeTo(out);
+        clientId.writeTo(out);
         purgedHistories.writeTo(out);
 
         out.writeInt(currentHistories.size());
@@ -58,7 +56,7 @@ public final class FrontendClientMetadata implements Identifiable<ClientIdentifi
     }
 
     public static FrontendClientMetadata readFrom(final DataInput in) throws IOException {
-        final ClientIdentifier id = ClientIdentifier.readFrom(in);
+        final var clientId = ClientIdentifier.readFrom(in);
         final var purgedHistories = ImmutableUnsignedLongSet.readFrom(in);
 
         final int currentSize = in.readInt();
@@ -67,12 +65,12 @@ public final class FrontendClientMetadata implements Identifiable<ClientIdentifi
             currentBuilder.add(FrontendHistoryMetadata.readFrom(in));
         }
 
-        return new FrontendClientMetadata(id, purgedHistories, currentBuilder.build());
+        return new FrontendClientMetadata(clientId, purgedHistories, currentBuilder.build());
     }
 
     @Override
     public String toString() {
-        return MoreObjects.toStringHelper(FrontendClientMetadata.class).add("identifer", identifier)
-                .add("current", currentHistories).add("purged", purgedHistories).toString();
+        return MoreObjects.toStringHelper(FrontendClientMetadata.class)
+            .add("clientId", clientId).add("current", currentHistories).add("purged", purgedHistories).toString();
     }
 }
index b7b1261192aeebe9ba8e689d7cff8d4bd12ba4e5..1d28ccac45ddc7af3858a2086e8005e12c4d8682 100644 (file)
@@ -11,55 +11,12 @@ import com.google.common.base.MoreObjects;
 import com.google.common.collect.ImmutableList;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 
-public final class FrontendShardDataTreeSnapshotMetadata extends
-        ShardDataTreeSnapshotMetadata<FrontendShardDataTreeSnapshotMetadata> {
-
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private List<FrontendClientMetadata> clients;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final FrontendShardDataTreeSnapshotMetadata metadata) {
-            this.clients = metadata.getClients();
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeInt(clients.size());
-            for (final FrontendClientMetadata c : clients) {
-                c.writeTo(out);
-            }
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException {
-            final int size = in.readInt();
-            final List<FrontendClientMetadata> readedClients = new ArrayList<>(size);
-            for (int i = 0; i < size ; ++i) {
-                readedClients.add(FrontendClientMetadata.readFrom(in));
-            }
-            this.clients = ImmutableList.copyOf(readedClients);
-        }
-
-        private Object readResolve() {
-            return new FrontendShardDataTreeSnapshotMetadata(clients);
-        }
-    }
-
+public final class FrontendShardDataTreeSnapshotMetadata
+        extends ShardDataTreeSnapshotMetadata<FrontendShardDataTreeSnapshotMetadata> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
@@ -77,7 +34,7 @@ public final class FrontendShardDataTreeSnapshotMetadata extends
 
     @Override
     protected Externalizable externalizableProxy() {
-        return new Proxy(this);
+        return new FM(this);
     }
 
     @Override
@@ -87,7 +44,8 @@ public final class FrontendShardDataTreeSnapshotMetadata extends
 
     @Override
     public String toString() {
-        return MoreObjects.toStringHelper(FrontendShardDataTreeSnapshotMetadata.class).add("clients", clients)
-                .toString();
+        return MoreObjects.toStringHelper(FrontendShardDataTreeSnapshotMetadata.class)
+            .add("clients", clients)
+            .toString();
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/MS.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/MS.java
new file mode 100644 (file)
index 0000000..94cd695
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+import com.google.common.collect.ImmutableMap;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.Map;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Externalizable proxy for {@link MetadataShardDataTreeSnapshot}.
+ */
+final class MS implements Externalizable {
+    private static final Logger LOG = LoggerFactory.getLogger(MS.class);
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private Map<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> metadata;
+    private NormalizedNodeStreamVersion version;
+    private NormalizedNode rootNode;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public MS() {
+        // For Externalizable
+    }
+
+    MS(final MetadataShardDataTreeSnapshot snapshot) {
+        rootNode = snapshot.getRootNode().orElseThrow();
+        metadata = snapshot.getMetadata();
+        version = snapshot.version().getStreamVersion();
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        out.writeInt(metadata.size());
+        for (var m : metadata.values()) {
+            out.writeObject(m);
+        }
+        try (var stream = version.newDataOutput(out)) {
+            stream.writeNormalizedNode(rootNode);
+        }
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        final int metaSize = in.readInt();
+        checkArgument(metaSize >= 0, "Invalid negative metadata map length %s", metaSize);
+
+        // Default pre-allocate is 4, which should be fine
+        final var metaBuilder = ImmutableMap
+            .<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>>builder();
+        for (int i = 0; i < metaSize; ++i) {
+            final var m = (ShardDataTreeSnapshotMetadata<?>) in.readObject();
+            if (m != null) {
+                metaBuilder.put(m.getType(), m);
+            } else {
+                LOG.warn("Skipping null metadata");
+            }
+        }
+        metadata = metaBuilder.build();
+
+        final var stream = NormalizedNodeDataInput.newDataInput(in);
+        version = stream.getVersion();
+        rootNode = stream.readNormalizedNode();
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return new MetadataShardDataTreeSnapshot(rootNode, metadata);
+    }
+}
\ No newline at end of file
index a97f8f46f377f5fd1b8bbd6e05013bb5f5dc243e..f1a0d7c5540ced84fca4ac6a5b79949bf4e1f1f9 100644 (file)
@@ -7,27 +7,15 @@
  */
 package org.opendaylight.controller.cluster.datastore.persisted;
 
-import static com.google.common.base.Preconditions.checkArgument;
 import static java.util.Objects.requireNonNull;
 
 import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects;
 import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableMap.Builder;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import java.io.Serializable;
-import java.io.StreamCorruptedException;
 import java.util.Map;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * An {@link AbstractVersionedShardDataTreeSnapshot} which contains additional metadata.
@@ -37,72 +25,7 @@ import org.slf4j.LoggerFactory;
 @Beta
 public final class MetadataShardDataTreeSnapshot extends AbstractVersionedShardDataTreeSnapshot
         implements Serializable {
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-        private static final Logger LOG = LoggerFactory.getLogger(MetadataShardDataTreeSnapshot.class);
-
-        private Map<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> metadata;
-        private NormalizedNodeStreamVersion version;
-        private NormalizedNode rootNode;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final MetadataShardDataTreeSnapshot snapshot) {
-            this.rootNode = snapshot.getRootNode().get();
-            this.metadata = snapshot.getMetadata();
-            this.version = snapshot.version().getStreamVersion();
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeInt(metadata.size());
-            for (ShardDataTreeSnapshotMetadata<?> m : metadata.values()) {
-                out.writeObject(m);
-            }
-            out.writeBoolean(true);
-            try (NormalizedNodeDataOutput stream = version.newDataOutput(out)) {
-                stream.writeNormalizedNode(rootNode);
-            }
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-            final int metaSize = in.readInt();
-            checkArgument(metaSize >= 0, "Invalid negative metadata map length %s", metaSize);
-
-            // Default pre-allocate is 4, which should be fine
-            final Builder<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>>
-                    metaBuilder = ImmutableMap.builder();
-            for (int i = 0; i < metaSize; ++i) {
-                final ShardDataTreeSnapshotMetadata<?> m = (ShardDataTreeSnapshotMetadata<?>) in.readObject();
-                if (m != null) {
-                    metaBuilder.put(m.getType(), m);
-                } else {
-                    LOG.warn("Skipping null metadata");
-                }
-            }
-
-            metadata = metaBuilder.build();
-            final boolean present = in.readBoolean();
-            if (!present) {
-                throw new StreamCorruptedException("Unexpected missing root node");
-            }
-
-            final NormalizedNodeDataInput stream = NormalizedNodeDataInput.newDataInput(in);
-            version = stream.getVersion();
-            rootNode = stream.readNormalizedNode();
-        }
-
-        private Object readResolve() {
-            return new MetadataShardDataTreeSnapshot(rootNode, metadata);
-        }
-    }
-
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
@@ -134,11 +57,12 @@ public final class MetadataShardDataTreeSnapshot extends AbstractVersionedShardD
 
     @Override
     PayloadVersion version() {
-        return PayloadVersion.MAGNESIUM;
+        return PayloadVersion.POTASSIUM;
     }
 
+    @java.io.Serial
     private Object writeReplace() {
-        return new Proxy(this);
+        return new MS(this);
     }
 
     @Override
index 061d9134294d9f98dd062db6b12b5547de3fbb43..8c771886f815e6adccfbab28cf892ff51afb1847 100644 (file)
@@ -10,18 +10,18 @@ package org.opendaylight.controller.cluster.datastore.persisted;
 import static java.util.Objects.requireNonNull;
 
 import java.util.Collection;
-import java.util.Optional;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
 
 /**
  * A deserialized {@link DataTreeCandidateNode} which represents a modification in
  * one of its children.
  */
 abstract class ModifiedDataTreeCandidateNode extends AbstractDataTreeCandidateNode {
-    private final Collection<DataTreeCandidateNode> children;
+    private final @NonNull Collection<DataTreeCandidateNode> children;
 
     private ModifiedDataTreeCandidateNode(final ModificationType type,
             final Collection<DataTreeCandidateNode> children) {
@@ -32,7 +32,7 @@ abstract class ModifiedDataTreeCandidateNode extends AbstractDataTreeCandidateNo
     static DataTreeCandidateNode create(final ModificationType type, final Collection<DataTreeCandidateNode> children) {
         return new ModifiedDataTreeCandidateNode(type, children) {
             @Override
-            public PathArgument getIdentifier() {
+            public PathArgument name() {
                 throw new UnsupportedOperationException("Root node does not have an identifier");
             }
         };
@@ -42,19 +42,19 @@ abstract class ModifiedDataTreeCandidateNode extends AbstractDataTreeCandidateNo
             final Collection<DataTreeCandidateNode> children) {
         return new ModifiedDataTreeCandidateNode(type, children) {
             @Override
-            public PathArgument getIdentifier() {
+            public PathArgument name() {
                 return identifier;
             }
         };
     }
 
     @Override
-    public final Optional<NormalizedNode> getDataAfter() {
+    public final NormalizedNode dataAfter() {
         throw new UnsupportedOperationException("After-image not available after serialization");
     }
 
     @Override
-    public final Collection<DataTreeCandidateNode> getChildNodes() {
+    public final Collection<DataTreeCandidateNode> childNodes() {
         return children;
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/PH.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/PH.java
new file mode 100644 (file)
index 0000000..dc95e31
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.io.ByteStreams;
+import java.io.IOException;
+import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
+import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload.SerialForm;
+
+/**
+ * Serialization proxy for {@link PurgeLocalHistoryPayload}.
+ */
+final class PH implements SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private LocalHistoryIdentifier identifier;
+    private byte[] bytes;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public PH() {
+        // For Externalizable
+    }
+
+    PH(final byte[] bytes) {
+        this.bytes = requireNonNull(bytes);
+    }
+
+    @Override
+    public byte[] bytes() {
+        return bytes;
+    }
+
+    @Override
+    public void readExternal(final byte[] newBytes) throws IOException {
+        bytes = requireNonNull(newBytes);
+        identifier = verifyNotNull(LocalHistoryIdentifier.readFrom(ByteStreams.newDataInput(newBytes)));
+    }
+
+    @Override
+    public Object readResolve() {
+        return new PurgeLocalHistoryPayload(identifier, bytes);
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/PT.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/PT.java
new file mode 100644 (file)
index 0000000..8ea773f
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.io.ByteStreams;
+import java.io.IOException;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload.SerialForm;
+
+/**
+ * Serialization proxy for {@link PurgeTransactionPayload}.
+ */
+final class PT implements SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionIdentifier identifier;
+    private byte[] bytes;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public PT() {
+        // For Externalizable
+    }
+
+    PT(final byte[] bytes) {
+        this.bytes = requireNonNull(bytes);
+    }
+
+    @Override
+    public byte[] bytes() {
+        return bytes;
+    }
+
+    @Override
+    public void readExternal(final byte[] newBytes) throws IOException {
+        bytes = requireNonNull(newBytes);
+        identifier = verifyNotNull(TransactionIdentifier.readFrom(ByteStreams.newDataInput(newBytes)));
+    }
+
+    @Override
+    public Object readResolve() {
+        return new PurgeTransactionPayload(identifier, bytes);
+    }
+}
index 7d51f4660c58057e3a456ceb4f502cc37a836822..298f835c5c85c1cc71db2bdb2b9b5834b6629b4a 100644 (file)
@@ -31,8 +31,6 @@ import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVer
  * participant instance should oppose RAFT candidates which produce persistence of an unsupported version. If a follower
  * encounters an unsupported version it must not become fully-operational, as it does not have an accurate view
  * of shard state.
- *
- * @author Robert Varga
  */
 @Beta
 public enum PayloadVersion implements WritableObject {
@@ -50,24 +48,27 @@ public enum PayloadVersion implements WritableObject {
     },
 
     /**
-     * ABI version as shipped in Sodium SR1 Simultaneous Release. QName-bearing messages are using
-     * {@link NormalizedNodeStreamVersion#SODIUM_SR1}, which improves encoding.
+     * ABI version shipped enabled {@code 2022.09 Chlorine SR2}. This version revises the serialization format of
+     * payloads proxies to reduce their size. Otherwise this format is equivalent to {@code #MAGNESIUM}.
+     *
+     * @deprecated Use {@link #POTASSIUM} instead.
      */
-    SODIUM_SR1(7) {
+    @Deprecated(since = "8.0.0", forRemoval = true)
+    CHLORINE_SR2(9) {
         @Override
         public NormalizedNodeStreamVersion getStreamVersion() {
-            return NormalizedNodeStreamVersion.SODIUM_SR1;
+            return NormalizedNodeStreamVersion.MAGNESIUM;
         }
     },
 
     /**
-     * Revised payload version. Payloads remain the same as {@link #SODIUM_SR1}, but messages bearing QNames in any
-     * shape are using {@link NormalizedNodeStreamVersion#MAGNESIUM}, which improves encoding.
+     * ABI version shipped enabled {@code 2023.09 Potassium}. This version removes Augmentation identifier and nodes.
+     * Otherwise this format is equivalent to {@link #CHLORINE_SR2}.
      */
-    MAGNESIUM(8) {
+    POTASSIUM(10) {
         @Override
         public NormalizedNodeStreamVersion getStreamVersion() {
-            return NormalizedNodeStreamVersion.MAGNESIUM;
+            return NormalizedNodeStreamVersion.POTASSIUM;
         }
     },
 
@@ -112,7 +113,7 @@ public enum PayloadVersion implements WritableObject {
      * @return Current {@link PayloadVersion}
      */
     public static @NonNull PayloadVersion current() {
-        return MAGNESIUM;
+        return POTASSIUM;
     }
 
     /**
@@ -126,22 +127,12 @@ public enum PayloadVersion implements WritableObject {
      */
     public static @NonNull PayloadVersion valueOf(final short version)
             throws FutureVersionException, PastVersionException {
-        switch (Short.toUnsignedInt(version)) {
-            case 0:
-            case 1:
-            case 2:
-            case 3:
-            case 4:
-            case 5:
-            case 6:
-                throw new PastVersionException(version, SODIUM_SR1);
-            case 7:
-                return SODIUM_SR1;
-            case 8:
-                return MAGNESIUM;
-            default:
-                throw new FutureVersionException(version, MAGNESIUM);
-        }
+        return switch (Short.toUnsignedInt(version)) {
+            case 0, 1, 2, 3, 4, 5, 6, 7, 8 -> throw new PastVersionException(version, CHLORINE_SR2);
+            case 9 -> CHLORINE_SR2;
+            case 10 -> POTASSIUM;
+            default -> throw new FutureVersionException(version, CHLORINE_SR2);
+        };
     }
 
     @Override
@@ -162,7 +153,7 @@ public enum PayloadVersion implements WritableObject {
         try {
             return valueOf(s);
         } catch (FutureVersionException | PastVersionException e) {
-            throw new IOException("Unsupported version", e);
+            throw new IOException(e);
         }
     }
 }
index 8d9a8d217a67e201dda8c8f9ba9f705ac939557c..3608e7589fea0640616ba72039b98fc3624848aa 100644 (file)
@@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.datastore.persisted;
 
 import com.google.common.io.ByteArrayDataOutput;
 import com.google.common.io.ByteStreams;
-import java.io.DataInput;
 import java.io.IOException;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.slf4j.Logger;
@@ -22,34 +21,10 @@ import org.slf4j.LoggerFactory;
  * @author Robert Varga
  */
 public final class PurgeLocalHistoryPayload extends AbstractIdentifiablePayload<LocalHistoryIdentifier> {
-    private static final class Proxy extends AbstractProxy<LocalHistoryIdentifier> {
-        private static final long serialVersionUID = 1L;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final byte[] serialized) {
-            super(serialized);
-        }
-
-        @Override
-        protected LocalHistoryIdentifier readIdentifier(final DataInput in) throws IOException {
-            return LocalHistoryIdentifier.readFrom(in);
-        }
-
-        @Override
-        protected PurgeLocalHistoryPayload createObject(final LocalHistoryIdentifier identifier,
-                final byte[] serialized) {
-            return new PurgeLocalHistoryPayload(identifier, serialized);
-        }
-    }
-
     private static final Logger LOG = LoggerFactory.getLogger(PurgeLocalHistoryPayload.class);
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+    private static final int PROXY_SIZE = externalizableProxySize(PH::new);
 
     PurgeLocalHistoryPayload(final LocalHistoryIdentifier historyId, final byte[] serialized) {
         super(historyId, serialized);
@@ -63,13 +38,18 @@ public final class PurgeLocalHistoryPayload extends AbstractIdentifiablePayload<
         } catch (IOException e) {
             // This should never happen
             LOG.error("Failed to serialize {}", historyId, e);
-            throw new RuntimeException("Failed to serialize " + historyId, e);
+            throw new IllegalStateException("Failed to serialize " + historyId, e);
         }
         return new PurgeLocalHistoryPayload(historyId, out.toByteArray());
     }
 
     @Override
-    protected Proxy externalizableProxy(final byte[] serialized) {
-        return new Proxy(serialized);
+    protected PH externalizableProxy(final byte[] serialized) {
+        return new PH(serialized);
+    }
+
+    @Override
+    protected int externalizableProxySize() {
+        return PROXY_SIZE;
     }
 }
index ac849723e168b1b439c1a90dd4f4b9f87504a07d..e63fa3b72dbbe74ff6a9578b9485029486e763a4 100644 (file)
@@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.datastore.persisted;
 
 import com.google.common.io.ByteArrayDataOutput;
 import com.google.common.io.ByteStreams;
-import java.io.DataInput;
 import java.io.IOException;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.slf4j.Logger;
@@ -21,34 +20,10 @@ import org.slf4j.LoggerFactory;
  * @author Robert Varga
  */
 public final class PurgeTransactionPayload extends AbstractIdentifiablePayload<TransactionIdentifier> {
-    private static final class Proxy extends AbstractProxy<TransactionIdentifier> {
-        private static final long serialVersionUID = 1L;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final byte[] serialized) {
-            super(serialized);
-        }
-
-        @Override
-        protected TransactionIdentifier readIdentifier(final DataInput in) throws IOException {
-            return TransactionIdentifier.readFrom(in);
-        }
-
-        @Override
-        protected PurgeTransactionPayload createObject(final TransactionIdentifier identifier,
-                final byte[] serialized) {
-            return new PurgeTransactionPayload(identifier, serialized);
-        }
-    }
-
     private static final Logger LOG = LoggerFactory.getLogger(PurgeTransactionPayload.class);
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+    private static final int PROXY_SIZE = externalizableProxySize(PT::new);
 
     PurgeTransactionPayload(final TransactionIdentifier transactionId, final byte[] serialized) {
         super(transactionId, serialized);
@@ -62,13 +37,18 @@ public final class PurgeTransactionPayload extends AbstractIdentifiablePayload<T
         } catch (IOException e) {
             // This should never happen
             LOG.error("Failed to serialize {}", transactionId, e);
-            throw new RuntimeException("Failed to serialize " + transactionId, e);
+            throw new IllegalStateException("Failed to serialize " + transactionId, e);
         }
         return new PurgeTransactionPayload(transactionId, out.toByteArray());
     }
 
     @Override
-    protected Proxy externalizableProxy(final byte[] serialized) {
-        return new Proxy(serialized);
+    protected PT externalizableProxy(final byte[] serialized) {
+        return new PT(serialized);
+    }
+
+    @Override
+    protected int externalizableProxySize() {
+        return PROXY_SIZE;
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/SM.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/SM.java
new file mode 100644 (file)
index 0000000..dc39f5c
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.ArrayList;
+
+/**
+ * Serialization proxy for {@link ShardManagerSnapshot}.
+ */
+final class SM implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ShardManagerSnapshot snapshot;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public SM() {
+        // For Externalizable
+    }
+
+    SM(final ShardManagerSnapshot snapshot) {
+        this.snapshot = requireNonNull(snapshot);
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        final int size = in.readInt();
+        final var shardList = new ArrayList<String>(size);
+        for (int i = 0; i < size; i++) {
+            shardList.add((String) in.readObject());
+        }
+        snapshot = new ShardManagerSnapshot(shardList);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        final var shardList = snapshot.getShardList();
+        out.writeInt(shardList.size());
+        for (var shardName : shardList) {
+            out.writeObject(shardName);
+        }
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(snapshot);
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/SS.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/SS.java
new file mode 100644 (file)
index 0000000..f719e1b
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+
+/**
+ * Serialization proxy for {@link ShardSnapshotState}.
+ */
+final class SS implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ShardSnapshotState snapshotState;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public SS() {
+        // For Externalizable
+    }
+
+    SS(final ShardSnapshotState snapshotState) {
+        this.snapshotState = requireNonNull(snapshotState);
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException {
+        snapshotState = ShardDataTreeSnapshot.deserialize(in);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        snapshotState.getSnapshot().serialize(out);
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(snapshotState);
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/ST.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/ST.java
new file mode 100644 (file)
index 0000000..ef082c7
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.io.ByteStreams;
+import java.io.IOException;
+import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
+import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload.SerialForm;
+import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
+
+/**
+ * Serialization proxy for {@link SkipTransactionsPayload}.
+ */
+final class ST implements SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ImmutableUnsignedLongSet transactionIds;
+    private LocalHistoryIdentifier identifier;
+    private byte[] bytes;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public ST() {
+        // For Externalizable
+    }
+
+    ST(final byte[] bytes) {
+        this.bytes = requireNonNull(bytes);
+    }
+
+    @Override
+    public byte[] bytes() {
+        return bytes;
+    }
+
+    @Override
+    public void readExternal(final byte[] newBytes) throws IOException {
+        bytes = requireNonNull(newBytes);
+
+        final var in = ByteStreams.newDataInput(newBytes);
+        identifier = LocalHistoryIdentifier.readFrom(in);
+        transactionIds = verifyNotNull(ImmutableUnsignedLongSet.readFrom(in));
+    }
+
+    @Override
+    public Object readResolve() {
+        return new SkipTransactionsPayload(identifier, bytes, transactionIds);
+    }
+}
index 728dd29ea3232c081dc0f5f0930f7185c514f287..86d293528a9dab2bdecf7c2b30800dd707b271a7 100644 (file)
@@ -8,12 +8,7 @@
 package org.opendaylight.controller.cluster.datastore.persisted;
 
 import com.google.common.collect.ImmutableList;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import java.io.Serializable;
-import java.util.ArrayList;
 import java.util.List;
 import org.eclipse.jdt.annotation.NonNull;
 
@@ -22,49 +17,10 @@ import org.eclipse.jdt.annotation.NonNull;
  *
  * @author Thomas Pantelis
  */
-public class ShardManagerSnapshot implements Serializable {
+public final class ShardManagerSnapshot implements Serializable {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private ShardManagerSnapshot snapshot;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final ShardManagerSnapshot snapshot) {
-            this.snapshot = snapshot;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeInt(snapshot.shardList.size());
-            for (String shard: snapshot.shardList) {
-                out.writeObject(shard);
-            }
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-            int size = in.readInt();
-            List<String> localShardList = new ArrayList<>(size);
-            for (int i = 0; i < size; i++) {
-                localShardList.add((String) in.readObject());
-            }
-
-            snapshot = new ShardManagerSnapshot(localShardList);
-        }
-
-        private Object readResolve() {
-            return snapshot;
-        }
-    }
-
     private final List<String> shardList;
 
     public ShardManagerSnapshot(final @NonNull List<String> shardList) {
@@ -72,11 +28,12 @@ public class ShardManagerSnapshot implements Serializable {
     }
 
     public List<String> getShardList() {
-        return this.shardList;
+        return shardList;
     }
 
+    @java.io.Serial
     private Object writeReplace() {
-        return new Proxy(this);
+        return new SM(this);
     }
 
     @Override
index a294584227e92765fa84e3ebfe4a6cb6bc9aa5d3..c06c5cf3189d404d38271ef094eccb4204131481 100644 (file)
@@ -11,10 +11,6 @@ import static java.util.Objects.requireNonNull;
 
 import com.google.common.annotations.VisibleForTesting;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
 
@@ -23,40 +19,10 @@ import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
  *
  * @author Thomas Pantelis
  */
-public class ShardSnapshotState implements Snapshot.State {
+public final class ShardSnapshotState implements Snapshot.State {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private ShardSnapshotState snapshotState;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final ShardSnapshotState snapshotState) {
-            this.snapshotState = snapshotState;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            snapshotState.snapshot.serialize(out);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException {
-            snapshotState = ShardDataTreeSnapshot.deserialize(in);
-        }
-
-        private Object readResolve() {
-            return snapshotState;
-        }
-    }
-
     @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
             + "implements writeReplace to delegate serialization to a Proxy class and thus instances of this class "
             + "aren't serialized. FindBugs does not recognize this.")
@@ -82,7 +48,8 @@ public class ShardSnapshotState implements Snapshot.State {
         return migrated;
     }
 
+    @java.io.Serial
     private Object writeReplace() {
-        return new Proxy(this);
+        return new SS(this);
     }
 }
index ec6e227a75a91bf672fb081a796c381b14754b48..a8fb52c4b97d06f1c91b3a28b7c03d7175a6685f 100644 (file)
@@ -7,12 +7,10 @@
  */
 package org.opendaylight.controller.cluster.datastore.persisted;
 
-import static com.google.common.base.Verify.verifyNotNull;
 import static java.util.Objects.requireNonNull;
 
 import com.google.common.io.ByteStreams;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.io.DataInput;
 import java.io.IOException;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
@@ -26,43 +24,15 @@ import org.slf4j.LoggerFactory;
  * local history.
  */
 public final class SkipTransactionsPayload extends AbstractIdentifiablePayload<LocalHistoryIdentifier> {
-    private static final class Proxy extends AbstractProxy<LocalHistoryIdentifier> {
-        private static final long serialVersionUID = 1L;
-
-        private ImmutableUnsignedLongSet transactionIds;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final byte[] serialized) {
-            super(serialized);
-        }
-
-        @Override
-        protected LocalHistoryIdentifier readIdentifier(final DataInput in) throws IOException {
-            final var id = LocalHistoryIdentifier.readFrom(in);
-            transactionIds = ImmutableUnsignedLongSet.readFrom(in);
-            return id;
-        }
-
-        @Override
-        protected SkipTransactionsPayload createObject(final LocalHistoryIdentifier identifier,
-                final byte[] serialized) {
-            return new SkipTransactionsPayload(identifier, serialized, verifyNotNull(transactionIds));
-        }
-    }
-
     private static final Logger LOG = LoggerFactory.getLogger(SkipTransactionsPayload.class);
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+    private static final int PROXY_SIZE = externalizableProxySize(ST::new);
 
     @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "Handled via externalizable proxy")
     private final @NonNull ImmutableUnsignedLongSet transactionIds;
 
-    private SkipTransactionsPayload(final @NonNull LocalHistoryIdentifier historyId,
+    SkipTransactionsPayload(final @NonNull LocalHistoryIdentifier historyId,
             final byte @NonNull [] serialized, final ImmutableUnsignedLongSet transactionIds) {
         super(historyId, serialized);
         this.transactionIds = requireNonNull(transactionIds);
@@ -77,7 +47,7 @@ public final class SkipTransactionsPayload extends AbstractIdentifiablePayload<L
         } catch (IOException e) {
             // This should never happen
             LOG.error("Failed to serialize {} ids {}", historyId, transactionIds, e);
-            throw new RuntimeException("Failed to serialize " + historyId + " ids " + transactionIds, e);
+            throw new IllegalStateException("Failed to serialize " + historyId + " ids " + transactionIds, e);
         }
 
         return new SkipTransactionsPayload(historyId, out.toByteArray(), transactionIds);
@@ -88,7 +58,12 @@ public final class SkipTransactionsPayload extends AbstractIdentifiablePayload<L
     }
 
     @Override
-    protected Proxy externalizableProxy(final byte[] serialized) {
-        return new Proxy(serialized);
+    protected ST externalizableProxy(final byte[] serialized) {
+        return new ST(serialized);
+    }
+
+    @Override
+    protected int externalizableProxySize() {
+        return PROXY_SIZE;
     }
 }
index d28734bca3dff55792d3c2f433f6e2838b0291e1..560b5344b0052bd09e455b396780cc5579211097 100644 (file)
@@ -18,9 +18,10 @@ import org.opendaylight.controller.cluster.datastore.DatastoreContextFactory;
 import org.opendaylight.controller.cluster.datastore.config.Configuration;
 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
 import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache;
+import org.opendaylight.yangtools.yang.common.Empty;
 
 public abstract class AbstractShardManagerCreator<T extends AbstractShardManagerCreator<T>> {
-    private SettableFuture<Void> readinessFuture;
+    private SettableFuture<Empty> readinessFuture;
     private ClusterWrapper cluster;
     private Configuration configuration;
     private DatastoreContextFactory datastoreContextFactory;
@@ -82,11 +83,11 @@ public abstract class AbstractShardManagerCreator<T extends AbstractShardManager
         return self();
     }
 
-    SettableFuture<Void> getReadinessFuture() {
+    SettableFuture<Empty> getReadinessFuture() {
         return readinessFuture;
     }
 
-    public T readinessFuture(final SettableFuture<Void> newReadinessFuture) {
+    public T readinessFuture(final SettableFuture<Empty> newReadinessFuture) {
         checkSealed();
         this.readinessFuture = newReadinessFuture;
         return self();
index 71b02f3223a3622937aa450776310859187724ee..6a8e392b96891bb0b1bbfeef127b20ac24060f33 100644 (file)
@@ -7,17 +7,17 @@
  */
 package org.opendaylight.controller.cluster.datastore.shardmanager;
 
-import com.google.common.base.Verify;
+import static com.google.common.base.Verify.verifyNotNull;
+
 import java.util.concurrent.atomic.AtomicReference;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
-import org.opendaylight.yangtools.yang.model.api.EffectiveModelContextProvider;
 
-final class AtomicShardContextProvider extends AtomicReference<EffectiveModelContext>
-        implements EffectiveModelContextProvider {
+final class AtomicShardContextProvider extends AtomicReference<EffectiveModelContext> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
-    @Override
-    public EffectiveModelContext getEffectiveModelContext() {
-        return Verify.verifyNotNull(get());
+    @NonNull EffectiveModelContext modelContext() {
+        return verifyNotNull(get());
     }
 }
\ No newline at end of file
index cc3d5a90c4cdf028a2112c90fbeb9eae40a765d9..f5c94413c7840cb1247e9f36cb3d59358334beae 100644 (file)
@@ -28,7 +28,7 @@ import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolve
 import org.opendaylight.controller.cluster.datastore.shardmanager.ShardManager.OnShardInitialized;
 import org.opendaylight.controller.cluster.datastore.shardmanager.ShardManager.OnShardReady;
 import org.opendaylight.controller.cluster.raft.RaftState;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -78,7 +78,7 @@ public final class ShardInformation {
 
     Props newProps() {
         Props props = requireNonNull(builder).id(shardId).peerAddresses(initialPeerAddresses)
-                .datastoreContext(datastoreContext).schemaContextProvider(schemaContextProvider).props();
+                .datastoreContext(datastoreContext).schemaContextProvider(schemaContextProvider::modelContext).props();
         builder = null;
         return props;
     }
@@ -100,8 +100,8 @@ public final class ShardInformation {
         return shardId;
     }
 
-    void setLocalDataTree(final Optional<ReadOnlyDataTree> dataTree) {
-        localShardDataTree = dataTree;
+    void setLocalDataTree(final ReadOnlyDataTree dataTree) {
+        localShardDataTree = Optional.ofNullable(dataTree);
     }
 
     Optional<ReadOnlyDataTree> getLocalShardDataTree() {
@@ -168,10 +168,10 @@ public final class ShardInformation {
             return;
         }
 
-        boolean ready = isShardReadyWithLeaderId();
-
-        LOG.debug("Shard {} is {} - notifying {} OnShardInitialized callbacks", shardId,
-            ready ? "ready" : "initialized", onShardInitializedSet.size());
+        final boolean ready = isShardReadyWithLeaderId();
+        final String readyStr = ready ? "ready" : "initialized";
+        LOG.debug("Shard {} is {} - notifying {} OnShardInitialized callbacks", shardId, readyStr,
+            onShardInitializedSet.size());
 
         Iterator<OnShardInitialized> iter = onShardInitializedSet.iterator();
         while (iter.hasNext()) {
@@ -256,7 +256,7 @@ public final class ShardInformation {
     }
 
     EffectiveModelContext getSchemaContext() {
-        return schemaContextProvider.getEffectiveModelContext();
+        return schemaContextProvider.modelContext();
     }
 
     void setSchemaContext(final EffectiveModelContext schemaContext) {
index 85469b27e8b4fec4fec619c8cce8126bb4ca3c8a..adc686723bd67fc602af2c19005fb5a44358284f 100644 (file)
@@ -5,10 +5,8 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.shardmanager;
 
-import static akka.pattern.Patterns.ask;
 import static java.util.Objects.requireNonNull;
 
 import akka.actor.ActorRef;
@@ -39,7 +37,6 @@ import com.google.common.util.concurrent.SettableFuture;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -104,6 +101,7 @@ import org.opendaylight.controller.cluster.raft.messages.ServerChangeStatus;
 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
 import org.opendaylight.controller.cluster.raft.policy.DisableElectionsRaftPolicy;
 import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -144,14 +142,14 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
 
     private DatastoreContextFactory datastoreContextFactory;
 
-    private final SettableFuture<Void> readinessFuture;
+    private final SettableFuture<Empty> readinessFuture;
 
     private final PrimaryShardInfoFutureCache primaryShardInfoCache;
 
     @VisibleForTesting
     final ShardPeerAddressResolver peerAddressResolver;
 
-    private EffectiveModelContext schemaContext;
+    private EffectiveModelContext modelContext;
 
     private DatastoreSnapshot restoreFromSnapshot;
 
@@ -165,16 +163,17 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
 
     private final String persistenceId;
 
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
     ShardManager(final AbstractShardManagerCreator<?> builder) {
-        this.cluster = builder.getCluster();
-        this.configuration = builder.getConfiguration();
-        this.datastoreContextFactory = builder.getDatastoreContextFactory();
-        this.type = datastoreContextFactory.getBaseDatastoreContext().getDataStoreName();
-        this.shardDispatcherPath =
-                new Dispatchers(context().system().dispatchers()).getDispatcherPath(Dispatchers.DispatcherType.Shard);
-        this.readinessFuture = builder.getReadinessFuture();
-        this.primaryShardInfoCache = builder.getPrimaryShardInfoCache();
-        this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
+        cluster = builder.getCluster();
+        configuration = builder.getConfiguration();
+        datastoreContextFactory = builder.getDatastoreContextFactory();
+        type = datastoreContextFactory.getBaseDatastoreContext().getDataStoreName();
+        shardDispatcherPath = new Dispatchers(context().system().dispatchers())
+            .getDispatcherPath(Dispatchers.DispatcherType.Shard);
+        readinessFuture = builder.getReadinessFuture();
+        primaryShardInfoCache = builder.getPrimaryShardInfoCache();
+        restoreFromSnapshot = builder.getRestoreFromSnapshot();
 
         String possiblePersistenceId = datastoreContextFactory.getBaseDatastoreContext().getShardManagerPersistenceId();
         persistenceId = possiblePersistenceId != null ? possiblePersistenceId : "shard-manager-" + type;
@@ -185,7 +184,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         cluster.subscribeToMemberEvents(getSelf());
 
         shardManagerMBean = new ShardManagerInfo(getSelf(), cluster.getCurrentMemberName(),
-                "shard-manager-" + this.type,
+                "shard-manager-" + type,
                 datastoreContextFactory.getBaseDatastoreContext().getDataStoreMXBeanType());
         shardManagerMBean.registerMBean();
     }
@@ -204,85 +203,80 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
 
     @Override
     public void handleCommand(final Object message) throws Exception {
-        if (message  instanceof FindPrimary) {
-            findPrimary((FindPrimary)message);
-        } else if (message instanceof FindLocalShard) {
-            findLocalShard((FindLocalShard) message);
-        } else if (message instanceof UpdateSchemaContext) {
-            updateSchemaContext(message);
-        } else if (message instanceof ActorInitialized) {
-            onActorInitialized(message);
-        } else if (message instanceof ClusterEvent.MemberUp) {
-            memberUp((ClusterEvent.MemberUp) message);
-        } else if (message instanceof ClusterEvent.MemberWeaklyUp) {
-            memberWeaklyUp((ClusterEvent.MemberWeaklyUp) message);
-        } else if (message instanceof ClusterEvent.MemberExited) {
-            memberExited((ClusterEvent.MemberExited) message);
-        } else if (message instanceof ClusterEvent.MemberRemoved) {
-            memberRemoved((ClusterEvent.MemberRemoved) message);
-        } else if (message instanceof ClusterEvent.UnreachableMember) {
-            memberUnreachable((ClusterEvent.UnreachableMember) message);
-        } else if (message instanceof ClusterEvent.ReachableMember) {
-            memberReachable((ClusterEvent.ReachableMember) message);
-        } else if (message instanceof DatastoreContextFactory) {
-            onDatastoreContextFactory((DatastoreContextFactory) message);
-        } else if (message instanceof RoleChangeNotification) {
-            onRoleChangeNotification((RoleChangeNotification) message);
-        } else if (message instanceof FollowerInitialSyncUpStatus) {
-            onFollowerInitialSyncStatus((FollowerInitialSyncUpStatus) message);
-        } else if (message instanceof ShardNotInitializedTimeout) {
-            onShardNotInitializedTimeout((ShardNotInitializedTimeout) message);
-        } else if (message instanceof ShardLeaderStateChanged) {
-            onLeaderStateChanged((ShardLeaderStateChanged) message);
-        } else if (message instanceof SwitchShardBehavior) {
-            onSwitchShardBehavior((SwitchShardBehavior) message);
-        } else if (message instanceof CreateShard) {
-            onCreateShard((CreateShard)message);
-        } else if (message instanceof AddShardReplica) {
-            onAddShardReplica((AddShardReplica) message);
-        } else if (message instanceof ForwardedAddServerReply) {
-            ForwardedAddServerReply msg = (ForwardedAddServerReply)message;
-            onAddServerReply(msg.shardInfo, msg.addServerReply, getSender(), msg.leaderPath,
-                    msg.removeShardOnFailure);
-        } else if (message instanceof ForwardedAddServerFailure) {
-            ForwardedAddServerFailure msg = (ForwardedAddServerFailure)message;
+        if (message instanceof FindPrimary msg) {
+            findPrimary(msg);
+        } else if (message instanceof FindLocalShard msg) {
+            findLocalShard(msg);
+        } else if (message instanceof UpdateSchemaContext msg) {
+            updateSchemaContext(msg);
+        } else if (message instanceof ActorInitialized msg) {
+            onActorInitialized(msg);
+        } else if (message instanceof ClusterEvent.MemberUp msg) {
+            memberUp(msg);
+        } else if (message instanceof ClusterEvent.MemberWeaklyUp msg) {
+            memberWeaklyUp(msg);
+        } else if (message instanceof ClusterEvent.MemberExited msg) {
+            memberExited(msg);
+        } else if (message instanceof ClusterEvent.MemberRemoved msg) {
+            memberRemoved(msg);
+        } else if (message instanceof ClusterEvent.UnreachableMember msg) {
+            memberUnreachable(msg);
+        } else if (message instanceof ClusterEvent.ReachableMember msg) {
+            memberReachable(msg);
+        } else if (message instanceof DatastoreContextFactory msg) {
+            onDatastoreContextFactory(msg);
+        } else if (message instanceof RoleChangeNotification msg) {
+            onRoleChangeNotification(msg);
+        } else if (message instanceof FollowerInitialSyncUpStatus msg) {
+            onFollowerInitialSyncStatus(msg);
+        } else if (message instanceof ShardNotInitializedTimeout msg) {
+            onShardNotInitializedTimeout(msg);
+        } else if (message instanceof ShardLeaderStateChanged msg) {
+            onLeaderStateChanged(msg);
+        } else if (message instanceof SwitchShardBehavior msg) {
+            onSwitchShardBehavior(msg);
+        } else if (message instanceof CreateShard msg) {
+            onCreateShard(msg);
+        } else if (message instanceof AddShardReplica msg) {
+            onAddShardReplica(msg);
+        } else if (message instanceof ForwardedAddServerReply msg) {
+            onAddServerReply(msg.shardInfo, msg.addServerReply, getSender(), msg.leaderPath, msg.removeShardOnFailure);
+        } else if (message instanceof ForwardedAddServerFailure msg) {
             onAddServerFailure(msg.shardName, msg.failureMessage, msg.failure, getSender(), msg.removeShardOnFailure);
-        } else if (message instanceof RemoveShardReplica) {
-            onRemoveShardReplica((RemoveShardReplica) message);
-        } else if (message instanceof WrappedShardResponse) {
-            onWrappedShardResponse((WrappedShardResponse) message);
-        } else if (message instanceof GetSnapshot) {
-            onGetSnapshot((GetSnapshot) message);
-        } else if (message instanceof ServerRemoved) {
-            onShardReplicaRemoved((ServerRemoved) message);
-        } else if (message instanceof ChangeShardMembersVotingStatus) {
-            onChangeShardServersVotingStatus((ChangeShardMembersVotingStatus) message);
-        } else if (message instanceof FlipShardMembersVotingStatus) {
-            onFlipShardMembersVotingStatus((FlipShardMembersVotingStatus) message);
-        } else if (message instanceof SaveSnapshotSuccess) {
-            onSaveSnapshotSuccess((SaveSnapshotSuccess) message);
-        } else if (message instanceof SaveSnapshotFailure) {
-            LOG.error("{}: SaveSnapshotFailure received for saving snapshot of shards", persistenceId(),
-                    ((SaveSnapshotFailure) message).cause());
+        } else if (message instanceof RemoveShardReplica msg) {
+            onRemoveShardReplica(msg);
+        } else if (message instanceof WrappedShardResponse msg) {
+            onWrappedShardResponse(msg);
+        } else if (message instanceof GetSnapshot msg) {
+            onGetSnapshot(msg);
+        } else if (message instanceof ServerRemoved msg) {
+            onShardReplicaRemoved(msg);
+        } else if (message instanceof ChangeShardMembersVotingStatus msg) {
+            onChangeShardServersVotingStatus(msg);
+        } else if (message instanceof FlipShardMembersVotingStatus msg) {
+            onFlipShardMembersVotingStatus(msg);
+        } else if (message instanceof SaveSnapshotSuccess msg) {
+            onSaveSnapshotSuccess(msg);
+        } else if (message instanceof SaveSnapshotFailure msg) {
+            LOG.error("{}: SaveSnapshotFailure received for saving snapshot of shards", persistenceId(), msg.cause());
         } else if (message instanceof Shutdown) {
             onShutDown();
         } else if (message instanceof GetLocalShardIds) {
             onGetLocalShardIds();
-        } else if (message instanceof GetShardRole) {
-            onGetShardRole((GetShardRole) message);
-        } else if (message instanceof RunnableMessage) {
-            ((RunnableMessage)message).run();
-        } else if (message instanceof RegisterForShardAvailabilityChanges) {
-            onRegisterForShardAvailabilityChanges((RegisterForShardAvailabilityChanges)message);
-        } else if (message instanceof DeleteSnapshotsFailure) {
-            LOG.warn("{}: Failed to delete prior snapshots", persistenceId(),
-                    ((DeleteSnapshotsFailure) message).cause());
+        } else if (message instanceof GetShardRole msg) {
+            onGetShardRole(msg);
+        } else if (message instanceof RunnableMessage msg) {
+            msg.run();
+        } else if (message instanceof RegisterForShardAvailabilityChanges msg) {
+            onRegisterForShardAvailabilityChanges(msg);
+        } else if (message instanceof DeleteSnapshotsFailure msg) {
+            LOG.warn("{}: Failed to delete prior snapshots", persistenceId(), msg.cause());
         } else if (message instanceof DeleteSnapshotsSuccess) {
             LOG.debug("{}: Successfully deleted prior snapshots", persistenceId());
         } else if (message instanceof RegisterRoleChangeListenerReply) {
             LOG.trace("{}: Received RegisterRoleChangeListenerReply", persistenceId());
-        } else if (message instanceof ClusterEvent.MemberEvent) {
-            LOG.trace("{}: Received other ClusterEvent.MemberEvent: {}", persistenceId(), message);
+        } else if (message instanceof ClusterEvent.MemberEvent msg) {
+            LOG.trace("{}: Received other ClusterEvent.MemberEvent: {}", persistenceId(), msg);
         } else {
             unknownMessage(message);
         }
@@ -384,8 +378,6 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         }
     }
 
-    @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-            justification = "https://github.com/spotbugs/spotbugs/issues/811")
     private void removeShardReplica(final RemoveShardReplica contextMessage, final String shardName,
             final String primaryPath, final ActorRef sender) {
         if (isShardReplicaOperationInProgress(shardName, sender)) {
@@ -403,7 +395,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
                 primaryPath, shardId);
 
         Timeout removeServerTimeout = new Timeout(datastoreContext.getShardLeaderElectionTimeout().duration());
-        Future<Object> futureObj = ask(getContext().actorSelection(primaryPath),
+        Future<Object> futureObj = Patterns.ask(getContext().actorSelection(primaryPath),
                 new RemoveServer(shardId.toString()), removeServerTimeout);
 
         futureObj.onComplete(new OnComplete<>() {
@@ -531,8 +523,6 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         }
     }
 
-    @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-        justification = "https://github.com/spotbugs/spotbugs/issues/811")
     private boolean isPreviousShardActorStopInProgress(final String shardName, final Object messageToDefer) {
         final CompositeOnComplete<Boolean> stopOnComplete = shardActorsStopping.get(shardName);
         if (stopOnComplete == null) {
@@ -584,7 +574,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
             // the shard with no peers and with elections disabled so it stays as follower. A
             // subsequent AddServer request will be needed to make it an active member.
             isActiveMember = false;
-            peerAddresses = Collections.emptyMap();
+            peerAddresses = Map.of();
             shardDatastoreContext = DatastoreContext.newBuilderFrom(shardDatastoreContext)
                     .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName()).build();
         }
@@ -598,8 +588,8 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         info.setActiveMember(isActiveMember);
         localShards.put(info.getShardName(), info);
 
-        if (schemaContext != null) {
-            info.setSchemaContext(schemaContext);
+        if (modelContext != null) {
+            info.setSchemaContext(modelContext);
             info.setActor(newShardActor(info));
         }
     }
@@ -616,7 +606,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
     private void checkReady() {
         if (isReadyWithLeaderId()) {
             LOG.info("{}: All Shards are ready - data store {} is ready", persistenceId(), type);
-            readinessFuture.set(null);
+            readinessFuture.set(Empty.value());
         }
     }
 
@@ -625,7 +615,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
 
         ShardInformation shardInformation = findShardInformation(leaderStateChanged.getMemberId());
         if (shardInformation != null) {
-            shardInformation.setLocalDataTree(leaderStateChanged.getLocalShardDataTree());
+            shardInformation.setLocalDataTree(leaderStateChanged.localShardDataTree());
             shardInformation.setLeaderVersion(leaderStateChanged.getLeaderPayloadVersion());
             if (shardInformation.setLeaderId(leaderStateChanged.getLeaderId())) {
                 primaryShardInfoCache.remove(shardInformation.getShardName());
@@ -717,13 +707,8 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         return true;
     }
 
-    private void onActorInitialized(final Object message) {
-        final ActorRef sender = getSender();
-
-        if (sender == null) {
-            // why is a non-actor sending this message? Just ignore.
-            return;
-        }
+    private void onActorInitialized(final ActorInitialized message) {
+        final var sender = message.actorRef();
 
         String actorName = sender.path().name();
         //find shard name from actor name; actor name is stringified shardId
@@ -754,8 +739,8 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
     protected void handleRecover(final Object message) throws Exception {
         if (message instanceof RecoveryCompleted) {
             onRecoveryCompleted();
-        } else if (message instanceof SnapshotOffer) {
-            applyShardManagerSnapshot((ShardManagerSnapshot)((SnapshotOffer) message).snapshot());
+        } else if (message instanceof SnapshotOffer msg) {
+            applyShardManagerSnapshot((ShardManagerSnapshot) msg.snapshot());
         }
     }
 
@@ -978,13 +963,13 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
      *
      * @param message the message to send
      */
-    private void updateSchemaContext(final Object message) {
-        schemaContext = ((UpdateSchemaContext) message).getEffectiveModelContext();
+    private void updateSchemaContext(final UpdateSchemaContext message) {
+        modelContext = message.modelContext();
 
-        LOG.debug("Got updated SchemaContext: # of modules {}", schemaContext.getModules().size());
+        LOG.debug("Got updated SchemaContext: # of modules {}", modelContext.getModules().size());
 
         for (ShardInformation info : localShards.values()) {
-            info.setSchemaContext(schemaContext);
+            info.setSchemaContext(modelContext);
 
             if (info.getActor() == null) {
                 LOG.debug("Creating Shard {}", info.getShardId());
@@ -1028,7 +1013,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
             sendResponse(info, message.isWaitUntilReady(), true, () -> {
                 String primaryPath = info.getSerializedLeaderActor();
                 Object found = canReturnLocalShardState && info.isLeader()
-                        ? new LocalPrimaryShardFound(primaryPath, info.getLocalShardDataTree().get()) :
+                        ? new LocalPrimaryShardFound(primaryPath, info.getLocalShardDataTree().orElseThrow()) :
                             new RemotePrimaryShardFound(primaryPath, info.getLeaderVersion());
 
                 LOG.debug("{}: Found primary for {}: {}", persistenceId(), shardName, found);
@@ -1070,20 +1055,18 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         Timeout findPrimaryTimeout = new Timeout(datastoreContextFactory.getBaseDatastoreContext()
                 .getShardInitializationTimeout().duration().$times(2));
 
-        Future<Object> futureObj = ask(getSelf(), new FindPrimary(shardName, true), findPrimaryTimeout);
+        Future<Object> futureObj = Patterns.ask(getSelf(), new FindPrimary(shardName, true), findPrimaryTimeout);
         futureObj.onComplete(new OnComplete<>() {
             @Override
             public void onComplete(final Throwable failure, final Object response) {
                 if (failure != null) {
                     handler.onFailure(failure);
+                } else if (response instanceof RemotePrimaryShardFound msg) {
+                    handler.onRemotePrimaryShardFound(msg);
+                } else if (response instanceof LocalPrimaryShardFound msg) {
+                    handler.onLocalPrimaryFound(msg);
                 } else {
-                    if (response instanceof RemotePrimaryShardFound) {
-                        handler.onRemotePrimaryShardFound((RemotePrimaryShardFound) response);
-                    } else if (response instanceof LocalPrimaryShardFound) {
-                        handler.onLocalPrimaryFound((LocalPrimaryShardFound) response);
-                    } else {
-                        handler.onUnknownResponse(response);
-                    }
+                    handler.onUnknownResponse(response);
                 }
             }
         }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
@@ -1105,8 +1088,8 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
      * Create shards that are local to the member on which the ShardManager runs.
      */
     private void createLocalShards() {
-        MemberName memberName = this.cluster.getCurrentMemberName();
-        Collection<String> memberShardNames = this.configuration.getMemberShardNames(memberName);
+        MemberName memberName = cluster.getCurrentMemberName();
+        Collection<String> memberShardNames = configuration.getMemberShardNames(memberName);
 
         Map<String, DatastoreSnapshot.ShardSnapshot> shardSnapshots = new HashMap<>();
         if (restoreFromSnapshot != null) {
@@ -1151,7 +1134,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
 
     private Map<String, String> getPeerAddresses(final String shardName, final Collection<MemberName> members) {
         Map<String, String> peerAddresses = new HashMap<>();
-        MemberName currentMemberName = this.cluster.getCurrentMemberName();
+        MemberName currentMemberName = cluster.getCurrentMemberName();
 
         for (MemberName memberName : members) {
             if (!currentMemberName.equals(memberName)) {
@@ -1200,7 +1183,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         LOG.debug("{}: onAddShardReplica: {}", persistenceId(), shardReplicaMsg);
 
         // verify the shard with the specified name is present in the cluster configuration
-        if (!this.configuration.isShardConfigured(shardName)) {
+        if (!configuration.isShardConfigured(shardName)) {
             LOG.debug("{}: No module configuration exists for shard {}", persistenceId(), shardName);
             getSender().tell(new Status.Failure(new IllegalArgumentException(
                 "No module configuration exists for shard " + shardName)), getSelf());
@@ -1208,7 +1191,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         }
 
         // Create the localShard
-        if (schemaContext == null) {
+        if (modelContext == null) {
             LOG.debug("{}: No SchemaContext is available in order to create a local shard instance for {}",
                 persistenceId(), shardName);
             getSender().tell(new Status.Failure(new IllegalStateException(
@@ -1235,16 +1218,12 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         });
     }
 
-    @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-            justification = "https://github.com/spotbugs/spotbugs/issues/811")
     private void sendLocalReplicaAlreadyExistsReply(final String shardName, final ActorRef sender) {
         LOG.debug("{}: Local shard {} already exists", persistenceId(), shardName);
         sender.tell(new Status.Failure(new AlreadyExistsException(
             String.format("Local shard %s already exists", shardName))), getSelf());
     }
 
-    @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-            justification = "https://github.com/spotbugs/spotbugs/issues/811")
     private void addShard(final String shardName, final RemotePrimaryShardFound response, final ActorRef sender) {
         if (isShardReplicaOperationInProgress(shardName, sender)) {
             return;
@@ -1265,7 +1244,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
             shardInfo = new ShardInformation(shardName, shardId, getPeerAddresses(shardName), datastoreContext,
                     Shard.builder(), peerAddressResolver);
             shardInfo.setActiveMember(false);
-            shardInfo.setSchemaContext(schemaContext);
+            shardInfo.setSchemaContext(modelContext);
             localShards.put(shardName, shardInfo);
             shardInfo.setActor(newShardActor(shardInfo));
         } else {
@@ -1291,7 +1270,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
 
         final Timeout addServerTimeout = new Timeout(shardInfo.getDatastoreContext()
                 .getShardLeaderElectionTimeout().duration());
-        final Future<Object> futureObj = ask(getContext().actorSelection(response.getPrimaryPath()),
+        final Future<Object> futureObj = Patterns.ask(getContext().actorSelection(response.getPrimaryPath()),
                 new AddServer(shardInfo.getShardId().toString(), localShardAddress, true), addServerTimeout);
 
         futureObj.onComplete(new OnComplete<>() {
@@ -1358,21 +1337,18 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
 
     private static Exception getServerChangeException(final Class<?> serverChange,
             final ServerChangeStatus serverChangeStatus, final String leaderPath, final ShardIdentifier shardId) {
-        switch (serverChangeStatus) {
-            case TIMEOUT:
-                return new TimeoutException(String.format(
-                        "The shard leader %s timed out trying to replicate the initial data to the new shard %s."
-                        + "Possible causes - there was a problem replicating the data or shard leadership changed "
-                        + "while replicating the shard data", leaderPath, shardId.getShardName()));
-            case NO_LEADER:
-                return new NoShardLeaderException(shardId);
-            case NOT_SUPPORTED:
-                return new UnsupportedOperationException(String.format("%s request is not supported for shard %s",
-                        serverChange.getSimpleName(), shardId.getShardName()));
-            default :
-                return new RuntimeException(String.format("%s request to leader %s for shard %s failed with status %s",
-                        serverChange.getSimpleName(), leaderPath, shardId.getShardName(), serverChangeStatus));
-        }
+        return switch (serverChangeStatus) {
+            case TIMEOUT -> new TimeoutException("""
+                The shard leader %s timed out trying to replicate the initial data to the new shard %s. Possible \
+                causes - there was a problem replicating the data or shard leadership changed while replicating the \
+                shard data""".formatted(leaderPath, shardId.getShardName()));
+            case NO_LEADER -> new NoShardLeaderException(shardId);
+            case NOT_SUPPORTED -> new UnsupportedOperationException(
+                "%s request is not supported for shard %s".formatted(
+                    serverChange.getSimpleName(), shardId.getShardName()));
+            default -> new RuntimeException("%s request to leader %s for shard %s failed with status %s".formatted(
+                serverChange.getSimpleName(), leaderPath, shardId.getShardName(), serverChangeStatus));
+        };
     }
 
     private void onRemoveShardReplica(final RemoveShardReplica shardReplicaMsg) {
@@ -1467,7 +1443,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         ActorRef sender = getSender();
         final String shardName = flipMembersVotingStatus.getShardName();
         findLocalShard(shardName, sender, localShardFound -> {
-            Future<Object> future = ask(localShardFound.getPath(), GetOnDemandRaftState.INSTANCE,
+            Future<Object> future = Patterns.ask(localShardFound.getPath(), GetOnDemandRaftState.INSTANCE,
                     Timeout.apply(30, TimeUnit.SECONDS));
 
             future.onComplete(new OnComplete<>() {
@@ -1518,31 +1494,27 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         Timeout findLocalTimeout = new Timeout(datastoreContextFactory.getBaseDatastoreContext()
                 .getShardInitializationTimeout().duration().$times(2));
 
-        Future<Object> futureObj = ask(getSelf(), new FindLocalShard(shardName, true), findLocalTimeout);
+        Future<Object> futureObj = Patterns.ask(getSelf(), new FindLocalShard(shardName, true), findLocalTimeout);
         futureObj.onComplete(new OnComplete<>() {
             @Override
             public void onComplete(final Throwable failure, final Object response) {
                 if (failure != null) {
                     LOG.debug("{}: Received failure from FindLocalShard for shard {}", persistenceId, shardName,
-                            failure);
+                        failure);
                     sender.tell(new Status.Failure(new RuntimeException(
-                            String.format("Failed to find local shard %s", shardName), failure)), self());
+                        String.format("Failed to find local shard %s", shardName), failure)), self());
+                } if (response instanceof LocalShardFound msg) {
+                    getSelf().tell((RunnableMessage) () -> onLocalShardFound.accept(msg), sender);
+                } else if (response instanceof LocalShardNotFound) {
+                    LOG.debug("{}: Local shard {} does not exist", persistenceId, shardName);
+                    sender.tell(new Status.Failure(new IllegalArgumentException(
+                        String.format("Local shard %s does not exist", shardName))), self());
                 } else {
-                    if (response instanceof LocalShardFound) {
-                        getSelf().tell((RunnableMessage) () -> onLocalShardFound.accept((LocalShardFound) response),
-                                sender);
-                    } else if (response instanceof LocalShardNotFound) {
-                        LOG.debug("{}: Local shard {} does not exist", persistenceId, shardName);
-                        sender.tell(new Status.Failure(new IllegalArgumentException(
-                            String.format("Local shard %s does not exist", shardName))), self());
-                    } else {
-                        LOG.debug("{}: Failed to find local shard {}: received response: {}", persistenceId, shardName,
-                            response);
-                        sender.tell(new Status.Failure(response instanceof Throwable ? (Throwable) response
-                                : new RuntimeException(
-                                    String.format("Failed to find local shard %s: received response: %s", shardName,
-                                        response))), self());
-                    }
+                    LOG.debug("{}: Failed to find local shard {}: received response: {}", persistenceId, shardName,
+                        response);
+                    sender.tell(new Status.Failure(response instanceof Throwable throwable ? throwable
+                        : new RuntimeException(String.format("Failed to find local shard %s: received response: %s",
+                            shardName, response))), self());
                 }
             }
         }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
@@ -1563,7 +1535,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
                 changeServersVotingStatus, shardActorRef.path());
 
         Timeout timeout = new Timeout(datastoreContext.getShardLeaderElectionTimeout().duration().$times(2));
-        Future<Object> futureObj = ask(shardActorRef, changeServersVotingStatus, timeout);
+        Future<Object> futureObj = Patterns.ask(shardActorRef, changeServersVotingStatus, timeout);
 
         futureObj.onComplete(new OnComplete<>() {
             @Override
index 244b0a186cdaba4b3400a5be8dcc3d08e08d30ae..8577a5914c0389905f9f55d4fff8fb496ac91cff 100644 (file)
@@ -11,6 +11,7 @@ import static java.util.Objects.requireNonNull;
 
 import akka.actor.ActorRef;
 import akka.pattern.Patterns;
+import com.google.common.base.Throwables;
 import java.util.List;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
@@ -47,10 +48,9 @@ final class ShardManagerInfo extends AbstractMXBean implements ShardManagerInfoM
         try {
             return (List<String>) Await.result(
                 Patterns.ask(shardManager, GetLocalShardIds.INSTANCE, ASK_TIMEOUT_MILLIS), Duration.Inf());
-        } catch (RuntimeException e) {
-            throw e;
         } catch (Exception e) {
-            throw new RuntimeException(e);
+            Throwables.throwIfUnchecked(e);
+            throw new IllegalStateException(e);
         }
     }
 
@@ -80,10 +80,9 @@ final class ShardManagerInfo extends AbstractMXBean implements ShardManagerInfoM
                 try {
                     Await.result(Patterns.ask(shardManager, new SwitchShardBehavior(shardId, state, term),
                         ASK_TIMEOUT_MILLIS), Duration.Inf());
-                } catch (RuntimeException e) {
-                    throw e;
                 } catch (Exception e) {
-                    throw new RuntimeException(e);
+                    Throwables.throwIfUnchecked(e);
+                    throw new IllegalStateException(e);
                 }
                 break;
             case Candidate:
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardManagerSnapshot.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardManagerSnapshot.java
deleted file mode 100644 (file)
index b990779..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2015 Dell Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.shardmanager;
-
-import com.google.common.collect.ImmutableList;
-import java.io.Serializable;
-import java.util.List;
-import org.eclipse.jdt.annotation.NonNull;
-
-/**
- * Persisted data of the ShardManager.
- *
- * @deprecated Use {@link org.opendaylight.controller.cluster.datastore.persisted.ShardManagerSnapshot} instead.
- */
-// FIXME: 5.0.0: remove this class
-@Deprecated(forRemoval = true)
-final class ShardManagerSnapshot implements Serializable {
-    private static final long serialVersionUID = 1L;
-
-    private final List<String> shardList;
-
-    ShardManagerSnapshot(final @NonNull List<String> shardList) {
-        this.shardList = ImmutableList.copyOf(shardList);
-    }
-
-    private Object readResolve() {
-        return new org.opendaylight.controller.cluster.datastore.persisted.ShardManagerSnapshot(shardList);
-    }
-
-    @Override
-    public String toString() {
-        return "ShardManagerSnapshot [ShardList = " + shardList + " ]";
-    }
-}
index ae0938e600859af95c31d2d34a7db7f3abcb6583..d740461fdf11c9db988d96866f3ec42e26cada29 100644 (file)
@@ -20,6 +20,7 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public abstract class AbstractBatchedModificationsCursor extends AbstractDataTreeModificationCursor {
     protected abstract BatchedModifications getModifications();
 
index a9a646cc9f112e128e302bedc0457bcb814ce8bd..8e61c569faa422f32f7dd13c7bfac2479a5d5f00 100644 (file)
@@ -7,13 +7,10 @@
  */
 package org.opendaylight.controller.cluster.datastore.utils;
 
-import static akka.pattern.Patterns.ask;
-
 import akka.actor.ActorPath;
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.ActorSystem;
-import akka.actor.Address;
 import akka.dispatch.Mapper;
 import akka.dispatch.OnComplete;
 import akka.pattern.AskTimeoutException;
@@ -23,7 +20,7 @@ import com.codahale.metrics.MetricRegistry;
 import com.codahale.metrics.Timer;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.lang.invoke.VarHandle;
 import java.util.Optional;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.LongAdder;
@@ -52,7 +49,7 @@ import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContex
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
 import org.opendaylight.controller.cluster.raft.client.messages.Shutdown;
 import org.opendaylight.controller.cluster.reporting.MetricsReporter;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ReadOnlyDataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -103,15 +100,13 @@ public class ActorUtils {
     private static final Mapper<Throwable, Throwable> FIND_PRIMARY_FAILURE_TRANSFORMER = new Mapper<>() {
         @Override
         public Throwable apply(final Throwable failure) {
-            Throwable actualFailure = failure;
             if (failure instanceof AskTimeoutException) {
                 // A timeout exception most likely means the shard isn't initialized.
-                actualFailure = new NotInitializedException(
+                return new NotInitializedException(
                         "Timed out trying to find the primary shard. Most likely cause is the "
                         + "shard is not initialized yet.");
             }
-
-            return actualFailure;
+            return failure;
         }
     };
     public static final String BOUNDED_MAILBOX = "bounded-mailbox";
@@ -134,10 +129,6 @@ public class ActorUtils {
 
     private volatile EffectiveModelContext schemaContext;
 
-    // Used as a write memory barrier.
-    @SuppressWarnings("unused")
-    private volatile boolean updated;
-
     private final MetricRegistry metricRegistry = MetricsReporter.getInstance(DatastoreContext.METRICS_DOMAIN)
             .getMetricsRegistry();
 
@@ -158,13 +149,13 @@ public class ActorUtils {
         this.clusterWrapper = clusterWrapper;
         this.configuration = configuration;
         this.datastoreContext = datastoreContext;
-        this.dispatchers = new Dispatchers(actorSystem.dispatchers());
+        dispatchers = new Dispatchers(actorSystem.dispatchers());
         this.primaryShardInfoCache = primaryShardInfoCache;
-        this.shardStrategyFactory = new ShardStrategyFactory(configuration);
+        shardStrategyFactory = new ShardStrategyFactory(configuration);
 
         setCachedProperties();
 
-        Address selfAddress = clusterWrapper.getSelfAddress();
+        final var selfAddress = clusterWrapper.getSelfAddress();
         if (selfAddress != null && !selfAddress.host().isEmpty()) {
             selfAddressHostPort = selfAddress.host().get() + ":" + selfAddress.port().get();
         } else {
@@ -179,7 +170,7 @@ public class ActorUtils {
             TimeUnit.MILLISECONDS);
         operationTimeout = new Timeout(operationDuration);
 
-        transactionCommitOperationTimeout =  new Timeout(FiniteDuration.create(
+        transactionCommitOperationTimeout = new Timeout(FiniteDuration.create(
                 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS));
 
         shardInitializationTimeout = new Timeout(datastoreContext.getShardInitializationTimeout().duration().$times(2));
@@ -214,17 +205,15 @@ public class ActorUtils {
     }
 
     public void setDatastoreContext(final DatastoreContextFactory contextFactory) {
-        this.datastoreContext = contextFactory.getBaseDatastoreContext();
+        datastoreContext = contextFactory.getBaseDatastoreContext();
         setCachedProperties();
 
-        // We write the 'updated' volatile to trigger a write memory barrier so that the writes above
-        // will be published immediately even though they may not be immediately visible to other
-        // threads due to unsynchronized reads. That's OK though - we're going for eventual
-        // consistency here as immediately visible updates to these members aren't critical. These
-        // members could've been made volatile but wanted to avoid volatile reads as these are
-        // accessed often and updates will be infrequent.
-
-        updated = true;
+        // Trigger a write memory barrier so that the writes above will be published immediately even though they may
+        // not be immediately visible to other threads due to unsynchronized reads. That is OK though - we are going for
+        // eventual consistency here as immediately visible updates to these members are not critical. These members
+        // could have been made volatile but wanted to avoid volatile reads as these are accessed often and updates will
+        // be infrequent.
+        VarHandle.fullFence();
 
         if (shardManager != null) {
             shardManager.tell(contextFactory, ActorRef.noSender());
@@ -236,45 +225,40 @@ public class ActorUtils {
     }
 
     public Future<PrimaryShardInfo> findPrimaryShardAsync(final String shardName) {
-        Future<PrimaryShardInfo> ret = primaryShardInfoCache.getIfPresent(shardName);
+        final var ret = primaryShardInfoCache.getIfPresent(shardName);
         if (ret != null) {
             return ret;
         }
-        Future<Object> future = executeOperationAsync(shardManager,
-                new FindPrimary(shardName, true), shardInitializationTimeout);
-
-        return future.transform(new Mapper<Object, PrimaryShardInfo>() {
-            @Override
-            public PrimaryShardInfo checkedApply(final Object response) throws UnknownMessageException {
-                if (response instanceof RemotePrimaryShardFound) {
-                    LOG.debug("findPrimaryShardAsync received: {}", response);
-                    RemotePrimaryShardFound found = (RemotePrimaryShardFound)response;
-                    return onPrimaryShardFound(shardName, found.getPrimaryPath(), found.getPrimaryVersion(), null);
-                } else if (response instanceof LocalPrimaryShardFound) {
-                    LOG.debug("findPrimaryShardAsync received: {}", response);
-                    LocalPrimaryShardFound found = (LocalPrimaryShardFound)response;
-                    return onPrimaryShardFound(shardName, found.getPrimaryPath(), DataStoreVersions.CURRENT_VERSION,
+
+        return executeOperationAsync(shardManager, new FindPrimary(shardName, true), shardInitializationTimeout)
+            .transform(new Mapper<>() {
+                @Override
+                public PrimaryShardInfo checkedApply(final Object response) throws UnknownMessageException {
+                    if (response instanceof RemotePrimaryShardFound found) {
+                        LOG.debug("findPrimaryShardAsync received: {}", found);
+                        return onPrimaryShardFound(shardName, found.getPrimaryPath(), found.getPrimaryVersion(), null);
+                    } else if (response instanceof LocalPrimaryShardFound found) {
+                        LOG.debug("findPrimaryShardAsync received: {}", found);
+                        return onPrimaryShardFound(shardName, found.getPrimaryPath(), DataStoreVersions.CURRENT_VERSION,
                             found.getLocalShardDataTree());
-                } else if (response instanceof NotInitializedException) {
-                    throw (NotInitializedException)response;
-                } else if (response instanceof PrimaryNotFoundException) {
-                    throw (PrimaryNotFoundException)response;
-                } else if (response instanceof NoShardLeaderException) {
-                    throw (NoShardLeaderException)response;
-                }
+                    } else if (response instanceof NotInitializedException notInitialized) {
+                        throw notInitialized;
+                    } else if (response instanceof PrimaryNotFoundException primaryNotFound) {
+                        throw primaryNotFound;
+                    } else if (response instanceof NoShardLeaderException noShardLeader) {
+                        throw noShardLeader;
+                    }
 
-                throw new UnknownMessageException(String.format(
+                    throw new UnknownMessageException(String.format(
                         "FindPrimary returned unkown response: %s", response));
-            }
-        }, FIND_PRIMARY_FAILURE_TRANSFORMER, getClientDispatcher());
+                }
+            }, FIND_PRIMARY_FAILURE_TRANSFORMER, getClientDispatcher());
     }
 
-    @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-            justification = "https://github.com/spotbugs/spotbugs/issues/811")
     private PrimaryShardInfo onPrimaryShardFound(final String shardName, final String primaryActorPath,
             final short primaryVersion, final ReadOnlyDataTree localShardDataTree) {
-        ActorSelection actorSelection = actorSystem.actorSelection(primaryActorPath);
-        PrimaryShardInfo info = localShardDataTree == null ? new PrimaryShardInfo(actorSelection, primaryVersion) :
+        final var actorSelection = actorSystem.actorSelection(primaryActorPath);
+        final var info = localShardDataTree == null ? new PrimaryShardInfo(actorSelection, primaryVersion) :
             new PrimaryShardInfo(actorSelection, primaryVersion, localShardDataTree);
         primaryShardInfoCache.putSuccessful(shardName, info);
         return info;
@@ -288,10 +272,8 @@ public class ActorUtils {
      *         specified by the shardName
      */
     public Optional<ActorRef> findLocalShard(final String shardName) {
-        Object result = executeOperation(shardManager, new FindLocalShard(shardName, false));
-
-        if (result instanceof LocalShardFound) {
-            LocalShardFound found = (LocalShardFound) result;
+        final var result = executeOperation(shardManager, new FindLocalShard(shardName, false));
+        if (result instanceof LocalShardFound found) {
             LOG.debug("Local shard found {}", found.getPath());
             return Optional.of(found.getPath());
         }
@@ -306,27 +288,23 @@ public class ActorUtils {
      * @param shardName the name of the local shard that needs to be found
      */
     public Future<ActorRef> findLocalShardAsync(final String shardName) {
-        Future<Object> future = executeOperationAsync(shardManager,
-                new FindLocalShard(shardName, true), shardInitializationTimeout);
-
-        return future.map(new Mapper<Object, ActorRef>() {
-            @Override
-            public ActorRef checkedApply(final Object response) throws Throwable {
-                if (response instanceof LocalShardFound) {
-                    LocalShardFound found = (LocalShardFound)response;
-                    LOG.debug("Local shard found {}", found.getPath());
-                    return found.getPath();
-                } else if (response instanceof NotInitializedException) {
-                    throw (NotInitializedException)response;
-                } else if (response instanceof LocalShardNotFound) {
-                    throw new LocalShardNotFoundException(
+        return executeOperationAsync(shardManager, new FindLocalShard(shardName, true), shardInitializationTimeout)
+            .map(new Mapper<>() {
+                @Override
+                public ActorRef checkedApply(final Object response) throws Throwable {
+                    if (response instanceof LocalShardFound found) {
+                        LOG.debug("Local shard found {}", found.getPath());
+                        return found.getPath();
+                    } else if (response instanceof NotInitializedException) {
+                        throw (NotInitializedException)response;
+                    } else if (response instanceof LocalShardNotFound) {
+                        throw new LocalShardNotFoundException(
                             String.format("Local shard for %s does not exist.", shardName));
-                }
+                    }
 
-                throw new UnknownMessageException(String.format(
-                        "FindLocalShard returned unkown response: %s", response));
-            }
-        }, getClientDispatcher());
+                    throw new UnknownMessageException("FindLocalShard returned unkown response: " + response);
+                }
+            }, getClientDispatcher());
     }
 
     /**
@@ -422,7 +400,7 @@ public class ActorUtils {
 
     @SuppressWarnings("checkstyle:IllegalCatch")
     public void shutdown() {
-        FiniteDuration duration = datastoreContext.getShardRaftConfig().getElectionTimeOutInterval().$times(3);
+        final var duration = datastoreContext.getShardRaftConfig().getElectionTimeOutInterval().$times(3);
         try {
             Await.ready(Patterns.gracefulStop(shardManager, duration, Shutdown.INSTANCE), duration);
         } catch (Exception e) {
@@ -444,15 +422,15 @@ public class ActorUtils {
     public void broadcast(final Function<Short, Object> messageSupplier, final Class<?> messageClass) {
         for (final String shardName : configuration.getAllShardNames()) {
 
-            Future<PrimaryShardInfo> primaryFuture = findPrimaryShardAsync(shardName);
-            primaryFuture.onComplete(new OnComplete<PrimaryShardInfo>() {
+            final var primaryFuture = findPrimaryShardAsync(shardName);
+            primaryFuture.onComplete(new OnComplete<>() {
                 @Override
                 public void onComplete(final Throwable failure, final PrimaryShardInfo primaryShardInfo) {
                     if (failure != null) {
                         LOG.warn("broadcast failed to send message {} to shard {}", messageClass.getSimpleName(),
                             shardName, failure);
                     } else {
-                        Object message = messageSupplier.apply(primaryShardInfo.getPrimaryShardVersion());
+                        final var message = messageSupplier.apply(primaryShardInfo.getPrimaryShardVersion());
                         primaryShardInfo.getPrimaryShardActor().tell(message, ActorRef.noSender());
                     }
                 }
@@ -486,7 +464,7 @@ public class ActorUtils {
                 return false;
             }
 
-            String hostPort = path.substring(pathAtIndex + 1, slashIndex);
+            final var hostPort = path.substring(pathAtIndex + 1, slashIndex);
             return hostPort.equals(selfAddressHostPort);
 
         } else {
@@ -507,8 +485,8 @@ public class ActorUtils {
     }
 
     public Timer getOperationTimer(final String dataStoreType, final String operationName) {
-        final String rate = MetricRegistry.name(DISTRIBUTED_DATA_STORE_METRIC_REGISTRY, dataStoreType,
-                operationName, METRIC_RATE);
+        final var rate = MetricRegistry.name(DISTRIBUTED_DATA_STORE_METRIC_REGISTRY, dataStoreType, operationName,
+            METRIC_RATE);
         return metricRegistry.timer(rate);
     }
 
@@ -561,11 +539,11 @@ public class ActorUtils {
      * @return the dispatcher
      */
     public ExecutionContext getClientDispatcher() {
-        return this.dispatchers.getDispatcher(Dispatchers.DispatcherType.Client);
+        return dispatchers.getDispatcher(Dispatchers.DispatcherType.Client);
     }
 
     public String getNotificationDispatcherPath() {
-        return this.dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Notification);
+        return dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Notification);
     }
 
     public Configuration getConfiguration() {
@@ -577,11 +555,11 @@ public class ActorUtils {
     }
 
     protected Future<Object> doAsk(final ActorRef actorRef, final Object message, final Timeout timeout) {
-        return ask(actorRef, message, timeout);
+        return Patterns.ask(actorRef, message, timeout);
     }
 
     protected Future<Object> doAsk(final ActorSelection actorRef, final Object message, final Timeout timeout) {
-        final Future<Object> ret = ask(actorRef, message, timeout);
+        final var ret = Patterns.ask(actorRef, message, timeout);
         ret.onComplete(askTimeoutCounter, askTimeoutCounter);
         return ret;
     }
index c298371044182d60758a1cb6c17073b474158b9c..5b073e856f6f163daf7af51f197f4b194ee04016 100644 (file)
@@ -16,7 +16,7 @@ import javax.xml.stream.XMLStreamException;
 import org.opendaylight.controller.cluster.datastore.util.AbstractDataTreeModificationCursor;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -54,7 +54,7 @@ public final class DataTreeModificationOutput {
                 output.write(current().node(child).toString().getBytes(StandardCharsets.UTF_8));
                 output.writeByte('\n');
             } catch (IOException e) {
-                throw new RuntimeException(e);
+                throw new IllegalStateException(e);
             }
         }
 
@@ -78,7 +78,7 @@ public final class DataTreeModificationOutput {
                 NormalizedNodeXMLOutput.toStream(output, data);
                 output.writeByte('\n');
             } catch (IOException | XMLStreamException e) {
-                throw new RuntimeException(e);
+                throw new IllegalStateException(e);
             }
         }
     }
index a78e36c811e93dc6e9f76352686170a0ed7fc8a3..4a17978f1ca6b81ff737eaa838886c82e29caa1e 100644 (file)
@@ -12,12 +12,12 @@ import java.util.Optional;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 
 public final class NormalizedNodeAggregator {
@@ -30,10 +30,9 @@ public final class NormalizedNodeAggregator {
             final LogicalDatastoreType logicalDatastoreType) {
         this.rootIdentifier = rootIdentifier;
         this.nodes = nodes;
-        this.dataTree = new InMemoryDataTreeFactory().create(
-            logicalDatastoreType == LogicalDatastoreType.CONFIGURATION ? DataTreeConfiguration.DEFAULT_CONFIGURATION
-                    : DataTreeConfiguration.DEFAULT_OPERATIONAL);
-        this.dataTree.setEffectiveModelContext(schemaContext);
+        dataTree = new InMemoryDataTreeFactory().create(logicalDatastoreType == LogicalDatastoreType.CONFIGURATION
+            ? DataTreeConfiguration.DEFAULT_CONFIGURATION : DataTreeConfiguration.DEFAULT_OPERATIONAL);
+        dataTree.setEffectiveModelContext(schemaContext);
     }
 
     /**
@@ -46,26 +45,26 @@ public final class NormalizedNodeAggregator {
     }
 
     private Optional<NormalizedNode> aggregate() throws DataValidationFailedException {
-        return combine().getRootNode();
-    }
-
-    private NormalizedNodeAggregator combine() throws DataValidationFailedException {
         final DataTreeModification mod = dataTree.takeSnapshot().newModification();
+        boolean nodePresent = false;
 
         for (final Optional<NormalizedNode> node : nodes) {
             if (node.isPresent()) {
-                mod.merge(rootIdentifier, node.get());
+                mod.merge(rootIdentifier, node.orElseThrow());
+                nodePresent = true;
             }
         }
+
+        if (!nodePresent) {
+            return Optional.empty();
+        }
+
+
         mod.ready();
         dataTree.validate(mod);
         final DataTreeCandidate candidate = dataTree.prepare(mod);
         dataTree.commit(candidate);
 
-        return this;
-    }
-
-    private Optional<NormalizedNode> getRootNode() {
         return dataTree.takeSnapshot().readNode(rootIdentifier);
     }
 }
index 06f3a0557ad47bd5e4c8f1e2d70c6af6734f5509..afa17aeac6079eda4c7b688d5c20153e73d8c60b 100644 (file)
@@ -19,10 +19,10 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModificationCursor;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.SchemaValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModificationCursor;
+import org.opendaylight.yangtools.yang.data.tree.api.SchemaValidationFailedException;
 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -127,8 +127,8 @@ public abstract class PruningDataTreeModification extends ForwardingObject imple
     }
 
     @Override
-    public final EffectiveModelContext getEffectiveModelContext() {
-        return delegate.getEffectiveModelContext();
+    public final EffectiveModelContext modelContext() {
+        return delegate.modelContext();
     }
 
     @Override
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/RootScatterGather.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/RootScatterGather.java
new file mode 100644 (file)
index 0000000..d755adc
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.base.MoreObjects;
+import com.google.common.collect.ImmutableList;
+import com.google.common.util.concurrent.FluentFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.NonNullByDefault;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+
+/**
+ * Utility methods for dealing with datastore root {@link ContainerNode} with respect to module shards.
+ */
+public final class RootScatterGather {
+    @NonNullByDefault
+    public record ShardContainer<T>(T shard, ContainerNode container) {
+        public ShardContainer {
+            requireNonNull(shard);
+            requireNonNull(container);
+        }
+
+        @Override
+        public String toString() {
+            return MoreObjects.toStringHelper(this).add("shard", shard).toString();
+        }
+    }
+
+    private RootScatterGather() {
+        // Hidden on purpose
+    }
+
+    /**
+     * Check whether a {@link NormalizedNode} represents a root container and return it cast to {@link ContainerNode}.
+     *
+     * @param node a normalized node
+     * @return {@code node} cast to ContainerNode
+     * @throws NullPointerException if {@code node} is null
+     * @throws IllegalArgumentException if {@code node} is not a {@link ContainerNode}
+     */
+    public static @NonNull ContainerNode castRootNode(final NormalizedNode node) {
+        final var nonnull = requireNonNull(node);
+        checkArgument(nonnull instanceof ContainerNode, "Invalid root data %s", nonnull);
+        return (ContainerNode) nonnull;
+    }
+
+    /**
+     * Reconstruct root container from a set of constituents.
+     *
+     * @param actorUtils {@link ActorUtils} reference
+     * @param readFutures Consitutent read futures
+     * @return A composite future
+     */
+    public static @NonNull FluentFuture<Optional<NormalizedNode>> gather(final ActorUtils actorUtils,
+            final Stream<FluentFuture<Optional<NormalizedNode>>> readFutures) {
+        return FluentFuture.from(Futures.transform(
+            Futures.allAsList(readFutures.collect(ImmutableList.toImmutableList())), input -> {
+                try {
+                    return NormalizedNodeAggregator.aggregate(YangInstanceIdentifier.of(), input,
+                        actorUtils.getSchemaContext(), actorUtils.getDatastoreContext().getLogicalStoreType());
+                } catch (DataValidationFailedException e) {
+                    throw new IllegalArgumentException("Failed to aggregate", e);
+                }
+            }, MoreExecutors.directExecutor()));
+    }
+
+    public static <T> @NonNull Stream<ShardContainer<T>> scatterAll(final ContainerNode rootNode,
+            final Function<PathArgument, T> childToShard, final Stream<T> allShards) {
+        final var builders = allShards
+            .collect(Collectors.toUnmodifiableMap(Function.identity(), unused -> ImmutableNodes.newContainerBuilder()));
+        for (var child : rootNode.body()) {
+            final var shard = childToShard.apply(child.name());
+            verifyNotNull(builders.get(shard), "Failed to find builder for %s", shard).addChild(child);
+        }
+        return streamContainers(rootNode.name(), builders);
+    }
+
+    /**
+     * Split root container into per-shard root containers.
+     *
+     * @param <T> Shard reference type
+     * @param rootNode Root container to be split up
+     * @param childToShard Mapping function from child {@link PathArgument} to shard reference
+     * @return Stream of {@link ShardContainer}s, one for each touched shard
+     */
+    public static <T> @NonNull Stream<ShardContainer<T>> scatterTouched(final ContainerNode rootNode,
+            final Function<PathArgument, T> childToShard) {
+        final var builders = new HashMap<T, ContainerNode.Builder>();
+        for (var child : rootNode.body()) {
+            builders.computeIfAbsent(childToShard.apply(child.name()), unused -> ImmutableNodes.newContainerBuilder())
+                .addChild(child);
+        }
+        return streamContainers(rootNode.name(), builders);
+    }
+
+    private static <T> @NonNull Stream<ShardContainer<T>> streamContainers(final NodeIdentifier rootId,
+            final Map<T, ContainerNode.Builder> builders) {
+        return builders.entrySet().stream()
+            .map(entry -> new ShardContainer<>(entry.getKey(), entry.getValue().withNodeIdentifier(rootId).build()));
+    }
+}
index 1d64e10e8395796c5487c275da62dc8a7e5454d6..ffef55d4d901c3fd6041ecd71fb3be971e43fd09 100644 (file)
@@ -14,7 +14,6 @@ import com.google.common.annotations.Beta;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Maps;
 import com.google.common.primitives.UnsignedLong;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
@@ -262,14 +261,10 @@ public abstract class UnsignedLongBitmap implements Immutable {
         return isEmpty() ? "{}" : appendEntries(new StringBuilder().append('{')).append('}').toString();
     }
 
-    @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-        justification = "https://github.com/spotbugs/spotbugs/issues/811")
     private static StringBuilder appendEntry(final StringBuilder sb, final long key, final boolean value) {
         return sb.append(Long.toUnsignedString(key)).append('=').append(value);
     }
 
-    @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-        justification = "https://github.com/spotbugs/spotbugs/issues/811")
     private static void writeEntry(final @NonNull DataOutput out, final long key, final boolean value)
             throws IOException {
         // FIXME: This serialization format is what we inherited. We could do better by storing the boolean in
index 3ad7d532ad63c8c37e047cbc3b3a73bf16426a91..21307e532cee418189e3601354b22464973808bd 100644 (file)
@@ -1,13 +1,16 @@
 // vi: set smarttab et sw=4 tabstop=4:
 module distributed-datastore-provider {
-
     yang-version 1;
     namespace "urn:opendaylight:params:xml:ns:yang:controller:config:distributed-datastore-provider";
     prefix "distributed-datastore-provider";
 
     description
         "This module contains the base YANG definitions for
-        the distributed datastore provider implementation";
+         the distributed datastore provider implementation";
+
+    revision "2023-12-29" {
+        description "Remote use-tell-based-protocol and shard-snapshot-chunk-size leaves";
+    }
 
     revision "2014-06-12" {
         description
@@ -209,28 +212,13 @@ module distributed-datastore-provider {
                          cannot be found then the default raft behavior will be applied";
         }
 
-        leaf shard-snapshot-chunk-size {
-            status deprecated;
-            default 2048000;
-            type non-zero-uint32-type;
-            description "When sending a snapshot to a follower, this is the maximum size in bytes for
-                         a chunk of data.";
-        }
-
         leaf maximum-message-slice-size {
-            default 2048000;
+            default 491520;
             type non-zero-uint32-type;
             description "When fragmenting messages thru the akka remoting framework, this is the
                          maximum size in bytes for a message slice.";
         }
 
-        leaf use-tell-based-protocol {
-            default false;
-            type boolean;
-            description "Use a newer protocol between the frontend and backend. This feature is considered
-                         exprerimental at this point.";
-        }
-
         leaf file-backed-streaming-threshold-in-megabytes {
             default 128;
             type non-zero-uint32-type;
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBrokerWriteTransactionTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBrokerWriteTransactionTest.java
deleted file mode 100644 (file)
index e77f645..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.databroker;
-
-import static org.junit.Assert.assertTrue;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.doThrow;
-
-import com.google.common.util.concurrent.FluentFuture;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.concurrent.ExecutionException;
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.mockito.Mock;
-import org.mockito.junit.MockitoJUnitRunner;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.common.api.TransactionCommitFailedException;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-
-@RunWith(MockitoJUnitRunner.StrictStubs.class)
-public class AbstractDOMBrokerWriteTransactionTest {
-    @Mock
-    private AbstractDOMTransactionFactory<?> abstractDOMTransactionFactory;
-    @Mock
-    private DOMStoreWriteTransaction domStoreWriteTransaction;
-
-    private class AbstractDOMBrokerWriteTransactionTestImpl
-            extends AbstractDOMBrokerWriteTransaction<DOMStoreWriteTransaction> {
-
-        AbstractDOMBrokerWriteTransactionTestImpl() {
-            super(new Object(), Collections.emptyMap(), abstractDOMTransactionFactory);
-        }
-
-        @Override
-        protected DOMStoreWriteTransaction createTransaction(final LogicalDatastoreType key) {
-            return null;
-        }
-
-        @Override
-        protected Collection<DOMStoreWriteTransaction> getSubtransactions() {
-            return Collections.singletonList(domStoreWriteTransaction);
-        }
-    }
-
-    @Test
-    public void readyRuntimeExceptionAndCancel() throws InterruptedException {
-        RuntimeException thrown = new RuntimeException();
-        doThrow(thrown).when(domStoreWriteTransaction).ready();
-        AbstractDOMBrokerWriteTransactionTestImpl abstractDOMBrokerWriteTransactionTestImpl =
-                new AbstractDOMBrokerWriteTransactionTestImpl();
-
-        FluentFuture<? extends CommitInfo> submitFuture = abstractDOMBrokerWriteTransactionTestImpl.commit();
-        try {
-            submitFuture.get();
-            Assert.fail("TransactionCommitFailedException expected");
-        } catch (ExecutionException e) {
-            assertTrue(e.getCause() instanceof TransactionCommitFailedException);
-            assertTrue(e.getCause().getCause() == thrown);
-            abstractDOMBrokerWriteTransactionTestImpl.cancel();
-        }
-    }
-
-    @Test
-    public void submitRuntimeExceptionAndCancel() throws InterruptedException {
-        RuntimeException thrown = new RuntimeException();
-        doThrow(thrown).when(abstractDOMTransactionFactory).commit(any(), any());
-        AbstractDOMBrokerWriteTransactionTestImpl abstractDOMBrokerWriteTransactionTestImpl
-                = new AbstractDOMBrokerWriteTransactionTestImpl();
-
-        FluentFuture<? extends CommitInfo> submitFuture = abstractDOMBrokerWriteTransactionTestImpl.commit();
-        try {
-            submitFuture.get();
-            Assert.fail("TransactionCommitFailedException expected");
-        } catch (ExecutionException e) {
-            assertTrue(e.getCause() instanceof TransactionCommitFailedException);
-            assertTrue(e.getCause().getCause() == thrown);
-            abstractDOMBrokerWriteTransactionTestImpl.cancel();
-        }
-    }
-}
index fa8b1ef99316c7a6ec7532bdb6cc211a246f6bd6..15fe8a417f066d95f0588a71d27d289446cd45f0 100644 (file)
@@ -8,10 +8,16 @@
 package org.opendaylight.controller.cluster.databroker;
 
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 
+import akka.util.Timeout;
+import com.google.common.base.Stopwatch;
+import com.google.common.util.concurrent.Uninterruptibles;
+import java.util.concurrent.ForkJoinPool;
+import java.util.concurrent.TimeUnit;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -29,10 +35,8 @@ import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransacti
 import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
 import org.opendaylight.controller.cluster.datastore.DatastoreContext;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.common.Empty;
+import scala.concurrent.duration.FiniteDuration;
 
 @RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class ClientBackedDataStoreTest {
@@ -49,19 +53,20 @@ public class ClientBackedDataStoreTest {
 
     @Mock
     private DataStoreClient clientActor;
-
+    @Mock
+    private DatastoreContext datastoreContext;
+    @Mock
+    private Timeout shardElectionTimeout;
     @Mock
     private ActorUtils actorUtils;
-
     @Mock
     private ClientLocalHistory clientLocalHistory;
-
     @Mock
     private ClientTransaction clientTransaction;
-
     @Mock
     private ClientSnapshot clientSnapshot;
 
+
     @Before
     public void setUp() {
         doReturn(DatastoreContext.newBuilder().build()).when(actorUtils).getDatastoreContext();
@@ -75,41 +80,66 @@ public class ClientBackedDataStoreTest {
 
     @Test
     public void testCreateTransactionChain() {
-        try (ClientBackedDataStore clientBackedDataStore = new ClientBackedDataStore(
-                actorUtils, UNKNOWN_ID, clientActor)) {
-            final DOMStoreTransactionChain txChain = clientBackedDataStore.createTransactionChain();
-            assertNotNull(txChain);
+        try (var clientBackedDataStore = new ClientBackedDataStore(actorUtils, UNKNOWN_ID, clientActor)) {
+            assertNotNull(clientBackedDataStore.createTransactionChain());
             verify(clientActor, times(1)).createLocalHistory();
         }
     }
 
     @Test
     public void testNewReadOnlyTransaction() {
-        try (ClientBackedDataStore clientBackedDataStore = new ClientBackedDataStore(
-                actorUtils, UNKNOWN_ID, clientActor)) {
-            final DOMStoreReadTransaction tx = clientBackedDataStore.newReadOnlyTransaction();
-            assertNotNull(tx);
+        try (var clientBackedDataStore = new ClientBackedDataStore(actorUtils, UNKNOWN_ID, clientActor)) {
+            assertNotNull(clientBackedDataStore.newReadOnlyTransaction());
             verify(clientActor, times(1)).createSnapshot();
         }
     }
 
     @Test
     public void testNewWriteOnlyTransaction() {
-        try (ClientBackedDataStore clientBackedDataStore = new ClientBackedDataStore(
-                actorUtils, UNKNOWN_ID, clientActor)) {
-            final DOMStoreWriteTransaction tx = clientBackedDataStore.newWriteOnlyTransaction();
-            assertNotNull(tx);
+        try (var clientBackedDataStore = new ClientBackedDataStore(actorUtils, UNKNOWN_ID, clientActor)) {
+            assertNotNull(clientBackedDataStore.newWriteOnlyTransaction());
             verify(clientActor, times(1)).createTransaction();
         }
     }
 
     @Test
     public void testNewReadWriteTransaction() {
-        try (ClientBackedDataStore clientBackedDataStore = new ClientBackedDataStore(
-                actorUtils, UNKNOWN_ID, clientActor)) {
-            final DOMStoreReadWriteTransaction tx = clientBackedDataStore.newReadWriteTransaction();
-            assertNotNull(tx);
+        try (var clientBackedDataStore = new ClientBackedDataStore(actorUtils, UNKNOWN_ID, clientActor)) {
+            assertNotNull(clientBackedDataStore.newReadWriteTransaction());
             verify(clientActor, times(1)).createTransaction();
         }
     }
+
+    @Test
+    public void testWaitTillReadyBlocking() {
+        doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
+        doReturn(shardElectionTimeout).when(datastoreContext).getShardLeaderElectionTimeout();
+        doReturn(1).when(datastoreContext).getInitialSettleTimeoutMultiplier();
+        doReturn(FiniteDuration.apply(50, TimeUnit.MILLISECONDS)).when(shardElectionTimeout).duration();
+        try (var clientBackedDataStore = new ClientBackedDataStore(actorUtils, UNKNOWN_ID, clientActor)) {
+            final var sw = Stopwatch.createStarted();
+            clientBackedDataStore.waitTillReady();
+            final var elapsedMillis = sw.stop().elapsed(TimeUnit.MILLISECONDS);
+
+            assertTrue("Expected to be blocked for 50 millis", elapsedMillis >= 50);
+        }
+    }
+
+    @Test
+    public void testWaitTillReadyCountDown() {
+        try (var clientBackedDataStore = new ClientBackedDataStore(actorUtils, UNKNOWN_ID, clientActor)) {
+            doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
+
+            ForkJoinPool.commonPool().submit(() -> {
+                Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
+                clientBackedDataStore.readinessFuture().set(Empty.value());
+            });
+
+            final var sw = Stopwatch.createStarted();
+            clientBackedDataStore.waitTillReady();
+            final var elapsedMillis = sw.stop().elapsed(TimeUnit.MILLISECONDS);
+
+            assertTrue("Expected to be released in 500 millis", elapsedMillis < 5000);
+        }
+    }
 }
index bcaedfa188ef8e10799f565ef58265522cdfaf54..08316b83911c5bb763962009bfafe869d9083112 100644 (file)
@@ -8,12 +8,10 @@
 package org.opendaylight.controller.cluster.databroker;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.doReturn;
 import static org.opendaylight.yangtools.util.concurrent.FluentFutures.immediateFluentFuture;
 import static org.opendaylight.yangtools.util.concurrent.FluentFutures.immediateTrueFluentFuture;
 
-import com.google.common.util.concurrent.ListenableFuture;
 import java.util.Optional;
 import org.junit.Before;
 import org.junit.Test;
@@ -23,14 +21,14 @@ import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
 import org.opendaylight.controller.cluster.databroker.actors.dds.ClientSnapshot;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 
 @RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class ClientBackedReadTransactionTest extends ClientBackedTransactionTest<ClientBackedReadTransaction> {
     private ClientBackedReadTransaction object;
 
     @Mock
-    private NormalizedNode data;
+    private ContainerNode data;
     @Mock
     private ClientActorContext clientContext;
     @Mock
@@ -45,23 +43,19 @@ public class ClientBackedReadTransactionTest extends ClientBackedTransactionTest
     public void setUp() {
         doReturn(TRANSACTION_ID).when(delegate).getIdentifier();
 
-        doReturn(immediateTrueFluentFuture()).when(delegate).exists(YangInstanceIdentifier.empty());
-        doReturn(immediateFluentFuture(Optional.of(data))).when(delegate).read(YangInstanceIdentifier.empty());
+        doReturn(immediateTrueFluentFuture()).when(delegate).exists(YangInstanceIdentifier.of());
+        doReturn(immediateFluentFuture(Optional.of(data))).when(delegate).read(YangInstanceIdentifier.of());
 
         object = new ClientBackedReadTransaction(delegate, null, null);
     }
 
     @Test
     public void testRead() throws Exception {
-        final ListenableFuture<Optional<NormalizedNode>> result = object().read(YangInstanceIdentifier.empty());
-        final Optional<NormalizedNode> resultData = result.get();
-        assertTrue(resultData.isPresent());
-        assertEquals(data, resultData.get());
+        assertEquals(Optional.of(data), object().read(YangInstanceIdentifier.of()).get());
     }
 
     @Test
     public void testExists() throws Exception {
-        final ListenableFuture<Boolean> result = object().exists(YangInstanceIdentifier.empty());
-        assertEquals(Boolean.TRUE, result.get());
+        assertEquals(Boolean.TRUE, object().exists(YangInstanceIdentifier.of()).get());
     }
 }
index a404030f0e734feb59c419ad4e0333afb8283ea8..11301cf3c7ea11495edd233e3c9d60a2c8a7ab91 100644 (file)
@@ -8,12 +8,10 @@
 package org.opendaylight.controller.cluster.databroker;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.doReturn;
 import static org.opendaylight.yangtools.util.concurrent.FluentFutures.immediateFluentFuture;
 import static org.opendaylight.yangtools.util.concurrent.FluentFutures.immediateTrueFluentFuture;
 
-import com.google.common.util.concurrent.FluentFuture;
 import java.util.Optional;
 import org.junit.Before;
 import org.junit.Test;
@@ -22,7 +20,7 @@ import org.mockito.Mock;
 import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 
 @RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class ClientBackedReadWriteTransactionTest
@@ -32,7 +30,7 @@ public class ClientBackedReadWriteTransactionTest
     @Mock
     private ClientTransaction delegate;
     @Mock
-    private NormalizedNode data;
+    private ContainerNode data;
 
     @Override
     ClientBackedReadWriteTransaction object() {
@@ -43,22 +41,19 @@ public class ClientBackedReadWriteTransactionTest
     public void setUp() {
         doReturn(TRANSACTION_ID).when(delegate).getIdentifier();
 
-        doReturn(immediateTrueFluentFuture()).when(delegate).exists(YangInstanceIdentifier.empty());
-        doReturn(immediateFluentFuture(Optional.of(data))).when(delegate).read(YangInstanceIdentifier.empty());
+        doReturn(immediateTrueFluentFuture()).when(delegate).exists(YangInstanceIdentifier.of());
+        doReturn(immediateFluentFuture(Optional.of(data))).when(delegate).read(YangInstanceIdentifier.of());
 
         object = new ClientBackedReadWriteTransaction(delegate, null);
     }
 
     @Test
     public void testRead() throws Exception {
-        final FluentFuture<Optional<NormalizedNode>> result = object().read(YangInstanceIdentifier.empty());
-        final Optional<NormalizedNode> resultData = result.get();
-        assertTrue(resultData.isPresent());
-        assertEquals(data, resultData.get());
+        assertEquals(Optional.of(data), object().read(YangInstanceIdentifier.of()).get());
     }
 
     @Test
     public void testExists() throws Exception {
-        assertEquals(Boolean.TRUE, object().exists(YangInstanceIdentifier.empty()).get());
+        assertEquals(Boolean.TRUE, object().exists(YangInstanceIdentifier.of()).get());
     }
 }
index 21ef8967aecaa2c3d512cbb48146eec01c7c3798..45430083510318c77dd5aff5e20225a8d2e947ba 100644 (file)
@@ -19,7 +19,7 @@ import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 
 @RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class ClientBackedWriteTransactionTest extends ClientBackedTransactionTest<ClientBackedWriteTransaction> {
@@ -28,9 +28,7 @@ public class ClientBackedWriteTransactionTest extends ClientBackedTransactionTes
     @Mock
     private ClientTransaction delegate;
     @Mock
-    private NormalizedNode data;
-    @Mock
-    private YangInstanceIdentifier path;
+    private ContainerNode data;
     @Mock
     private DOMStoreThreePhaseCommitCohort readyCohort;
 
@@ -49,20 +47,20 @@ public class ClientBackedWriteTransactionTest extends ClientBackedTransactionTes
 
     @Test
     public void testWrite() {
-        object().write(path, data);
-        verify(delegate).write(path, data);
+        object().write(YangInstanceIdentifier.of(), data);
+        verify(delegate).write(YangInstanceIdentifier.of(), data);
     }
 
     @Test
     public void testMerge() {
-        object().merge(path, data);
-        verify(delegate).merge(path, data);
+        object().merge(YangInstanceIdentifier.of(), data);
+        verify(delegate).merge(YangInstanceIdentifier.of(), data);
     }
 
     @Test
     public void testDelete() {
-        object().delete(path);
-        verify(delegate).delete(path);
+        object().delete(YangInstanceIdentifier.of());
+        verify(delegate).delete(YangInstanceIdentifier.of());
     }
 
     @Test
index 482045181a4b7a120f500cab1d2c605dd03129c4..630d582f0c29851d7b5dacd488101b47a459b58f 100644 (file)
@@ -24,7 +24,6 @@ import static org.opendaylight.yangtools.util.concurrent.FluentFutures.immediate
 import static org.opendaylight.yangtools.util.concurrent.FluentFutures.immediateTrueFluentFuture;
 
 import com.google.common.base.Throwables;
-import com.google.common.collect.ClassToInstanceMap;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.util.concurrent.FluentFuture;
 import com.google.common.util.concurrent.FutureCallback;
@@ -34,8 +33,6 @@ import com.google.common.util.concurrent.MoreExecutors;
 import com.google.common.util.concurrent.SettableFuture;
 import com.google.common.util.concurrent.Uninterruptibles;
 import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
 import java.util.List;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutionException;
@@ -49,23 +46,18 @@ import org.junit.Before;
 import org.junit.Test;
 import org.mockito.InOrder;
 import org.mockito.stubbing.Answer;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStore;
-import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
 import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.DataStoreUnavailableException;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.mdsal.common.api.TransactionCommitFailedException;
-import org.opendaylight.mdsal.dom.api.DOMDataBrokerExtension;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeService;
+import org.opendaylight.mdsal.dom.api.DOMDataBroker.CommitCohortExtension;
+import org.opendaylight.mdsal.dom.api.DOMDataBroker.DataTreeChangeExtension;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeReadWriteTransaction;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
 import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
-import org.opendaylight.mdsal.dom.broker.TransactionCommitFailedExceptionMapper;
+import org.opendaylight.mdsal.dom.spi.TransactionCommitFailedExceptionMapper;
 import org.opendaylight.mdsal.dom.spi.store.DOMStore;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
@@ -74,7 +66,7 @@ import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
 import org.opendaylight.mdsal.dom.store.inmemory.InMemoryDOMDataStore;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 
 /**
  * Unit tests for DOMConcurrentDataCommitCoordinator.
@@ -84,8 +76,7 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 public class ConcurrentDOMDataBrokerTest {
 
     private final DOMDataTreeWriteTransaction transaction = mock(DOMDataTreeWriteTransaction.class);
-    private final DOMStoreThreePhaseCommitCohort mockCohort1 = mock(DOMStoreThreePhaseCommitCohort.class);
-    private final DOMStoreThreePhaseCommitCohort mockCohort2 = mock(DOMStoreThreePhaseCommitCohort.class);
+    private final DOMStoreThreePhaseCommitCohort mockCohort = mock(DOMStoreThreePhaseCommitCohort.class);
     private final ThreadPoolExecutor futureExecutor =
             new ThreadPoolExecutor(0, 1, 5, TimeUnit.SECONDS, new SynchronousQueue<>());
     private ConcurrentDOMDataBroker coordinator;
@@ -121,8 +112,7 @@ public class ConcurrentDOMDataBrokerTest {
             final SettableFuture<Boolean> future = SettableFuture.create();
             if (doAsync) {
                 new Thread(() -> {
-                    Uninterruptibles.awaitUninterruptibly(asyncCanCommitContinue,
-                            10, TimeUnit.SECONDS);
+                    Uninterruptibles.awaitUninterruptibly(asyncCanCommitContinue, 10, TimeUnit.SECONDS);
                     future.set(Boolean.TRUE);
                 }).start();
             } else {
@@ -132,16 +122,11 @@ public class ConcurrentDOMDataBrokerTest {
             return future;
         };
 
-        doAnswer(asyncCanCommit).when(mockCohort1).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort1).preCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort1).commit();
+        doAnswer(asyncCanCommit).when(mockCohort).canCommit();
+        doReturn(immediateNullFluentFuture()).when(mockCohort).preCommit();
+        doReturn(immediateNullFluentFuture()).when(mockCohort).commit();
 
-        doReturn(immediateTrueFluentFuture()).when(mockCohort2).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort2).preCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort2).commit();
-
-        ListenableFuture<? extends CommitInfo> future =
-                coordinator.commit(transaction, Arrays.asList(mockCohort1, mockCohort2));
+        ListenableFuture<? extends CommitInfo> future = coordinator.commit(transaction, mockCohort);
 
         final CountDownLatch doneLatch = new CountDownLatch(1);
         final AtomicReference<Throwable> caughtEx = new AtomicReference<>();
@@ -169,35 +154,22 @@ public class ConcurrentDOMDataBrokerTest {
 
         assertEquals("Task count", doAsync ? 1 : 0, futureExecutor.getTaskCount());
 
-        InOrder inOrder = inOrder(mockCohort1, mockCohort2);
-        inOrder.verify(mockCohort1).canCommit();
-        inOrder.verify(mockCohort2).canCommit();
-        inOrder.verify(mockCohort1).preCommit();
-        inOrder.verify(mockCohort2).preCommit();
-        inOrder.verify(mockCohort1).commit();
-        inOrder.verify(mockCohort2).commit();
+        InOrder inOrder = inOrder(mockCohort);
+        inOrder.verify(mockCohort, times(1)).canCommit();
+        inOrder.verify(mockCohort, times(1)).preCommit();
+        inOrder.verify(mockCohort, times(1)).commit();
     }
 
     @Test
     public void testSubmitWithNegativeCanCommitResponse() throws Exception {
-        doReturn(immediateTrueFluentFuture()).when(mockCohort1).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort1).abort();
-
-        doReturn(Futures.immediateFuture(Boolean.FALSE)).when(mockCohort2).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort2).abort();
-
-        DOMStoreThreePhaseCommitCohort mockCohort3 = mock(DOMStoreThreePhaseCommitCohort.class);
-        doReturn(Futures.immediateFuture(Boolean.FALSE)).when(mockCohort3).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort3).abort();
-
-        ListenableFuture<? extends CommitInfo> future = coordinator.commit(
-                transaction, Arrays.asList(mockCohort1, mockCohort2, mockCohort3));
+        doReturn(Futures.immediateFuture(Boolean.FALSE)).when(mockCohort).canCommit();
+        doReturn(immediateNullFluentFuture()).when(mockCohort).abort();
 
-        assertFailure(future, null, mockCohort1, mockCohort2, mockCohort3);
+        assertFailure(coordinator.commit(transaction, mockCohort), null, mockCohort);
     }
 
     private static void assertFailure(final ListenableFuture<?> future, final Exception expCause,
-            final DOMStoreThreePhaseCommitCohort... mockCohorts) throws Exception {
+            final DOMStoreThreePhaseCommitCohort mockCohort) throws Exception {
         try {
             future.get(5, TimeUnit.SECONDS);
             fail("Expected TransactionCommitFailedException");
@@ -206,11 +178,7 @@ public class ConcurrentDOMDataBrokerTest {
             if (expCause != null) {
                 assertSame("Expected cause", expCause.getClass(), tcf.getCause().getClass());
             }
-
-            InOrder inOrder = inOrder((Object[])mockCohorts);
-            for (DOMStoreThreePhaseCommitCohort c: mockCohorts) {
-                inOrder.verify(c).abort();
-            }
+            verify(mockCohort, times(1)).abort();
         } catch (TimeoutException e) {
             throw e;
         }
@@ -218,97 +186,42 @@ public class ConcurrentDOMDataBrokerTest {
 
     @Test
     public void testSubmitWithCanCommitException() throws Exception {
-        doReturn(immediateTrueFluentFuture()).when(mockCohort1).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort1).abort();
-
-        IllegalStateException cause = new IllegalStateException("mock");
-        doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort2).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort2).abort();
-
-        FluentFuture<? extends CommitInfo> future = coordinator.commit(
-                transaction, Arrays.asList(mockCohort1, mockCohort2));
+        final Exception cause = new IllegalStateException("mock");
+        doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort).canCommit();
+        doReturn(immediateNullFluentFuture()).when(mockCohort).abort();
 
-        assertFailure(future, cause, mockCohort1, mockCohort2);
-    }
-
-    @Test
-    public void testSubmitWithCanCommitDataStoreUnavailableException() throws Exception {
-        doReturn(immediateTrueFluentFuture()).when(mockCohort1).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort1).abort();
-        NoShardLeaderException rootCause = new NoShardLeaderException("mock");
-        DataStoreUnavailableException cause = new DataStoreUnavailableException(rootCause.getMessage(), rootCause);
-        doReturn(Futures.immediateFailedFuture(rootCause)).when(mockCohort2).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort2).abort();
-
-        FluentFuture<? extends CommitInfo> future = coordinator.commit(
-            transaction, Arrays.asList(mockCohort1, mockCohort2));
-
-        assertFailure(future, cause, mockCohort1, mockCohort2);
+        assertFailure(coordinator.commit(transaction, mockCohort), cause, mockCohort);
     }
 
     @Test
     public void testSubmitWithPreCommitException() throws Exception {
-        doReturn(immediateTrueFluentFuture()).when(mockCohort1).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort1).preCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort1).abort();
-
-        doReturn(immediateTrueFluentFuture()).when(mockCohort2).canCommit();
-        IllegalStateException cause = new IllegalStateException("mock");
-        doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort2).preCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort2).abort();
-
-        DOMStoreThreePhaseCommitCohort mockCohort3 = mock(DOMStoreThreePhaseCommitCohort.class);
-        doReturn(immediateTrueFluentFuture()).when(mockCohort3).canCommit();
-        doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock2")))
-                .when(mockCohort3).preCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort3).abort();
-
-        FluentFuture<? extends CommitInfo> future = coordinator.commit(
-                transaction, Arrays.asList(mockCohort1, mockCohort2, mockCohort3));
+        doReturn(immediateTrueFluentFuture()).when(mockCohort).canCommit();
+        final IllegalStateException cause = new IllegalStateException("mock");
+        doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort).preCommit();
+        doReturn(immediateNullFluentFuture()).when(mockCohort).abort();
 
-        assertFailure(future, cause, mockCohort1, mockCohort2, mockCohort3);
+        assertFailure(coordinator.commit(transaction, mockCohort), cause, mockCohort);
     }
 
     @Test
     public void testSubmitWithCommitException() throws Exception {
-        doReturn(immediateTrueFluentFuture()).when(mockCohort1).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort1).preCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort1).commit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort1).abort();
-
-        doReturn(immediateTrueFluentFuture()).when(mockCohort2).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort2).preCommit();
-        IllegalStateException cause = new IllegalStateException("mock");
-        doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort2).commit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort2).abort();
-
-        DOMStoreThreePhaseCommitCohort mockCohort3 = mock(DOMStoreThreePhaseCommitCohort.class);
-        doReturn(immediateTrueFluentFuture()).when(mockCohort3).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort3).preCommit();
-        doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock2")))
-                .when(mockCohort3).commit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort3).abort();
-
-        FluentFuture<? extends CommitInfo> future = coordinator.commit(
-                transaction, Arrays.asList(mockCohort1, mockCohort2, mockCohort3));
-
-        assertFailure(future, cause, mockCohort1, mockCohort2, mockCohort3);
+        doReturn(immediateTrueFluentFuture()).when(mockCohort).canCommit();
+        doReturn(immediateNullFluentFuture()).when(mockCohort).preCommit();
+        final IllegalStateException cause = new IllegalStateException("mock");
+        doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort).commit();
+        doReturn(immediateNullFluentFuture()).when(mockCohort).abort();
+
+        assertFailure(coordinator.commit(transaction, mockCohort), cause, mockCohort);
     }
 
     @Test
     public void testSubmitWithAbortException() throws Exception {
-        doReturn(immediateTrueFluentFuture()).when(mockCohort1).canCommit();
-        doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock abort error")))
-                .when(mockCohort1).abort();
+        final Exception canCommitCause = new IllegalStateException("canCommit error");
+        doReturn(Futures.immediateFailedFuture(canCommitCause)).when(mockCohort).canCommit();
+        final Exception abortCause = new IllegalStateException("abort error");
+        doReturn(Futures.immediateFailedFuture(abortCause)).when(mockCohort).abort();
 
-        IllegalStateException cause = new IllegalStateException("mock canCommit error");
-        doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort2).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort2).abort();
-
-        FluentFuture<? extends CommitInfo> future = coordinator.commit(
-                transaction, Arrays.asList(mockCohort1, mockCohort2));
-
-        assertFailure(future, cause, mockCohort1, mockCohort2);
+        assertFailure(coordinator.commit(transaction, mockCohort), canCommitCause, mockCohort);
     }
 
     @Test
@@ -361,17 +274,12 @@ public class ConcurrentDOMDataBrokerTest {
                 configDomStore), futureExecutor)) {
             DOMDataTreeReadWriteTransaction dataTxn = dataBroker.newReadWriteTransaction();
 
-            dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.empty(), mock(NormalizedNode.class));
-            dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.empty(), mock(NormalizedNode.class));
-            dataTxn.read(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.empty());
+            dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of(), mock(ContainerNode.class));
+            dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of(), mock(ContainerNode.class));
+            dataTxn.read(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of());
 
             verify(configDomStore, never()).newReadWriteTransaction();
             verify(operationalDomStore, times(1)).newReadWriteTransaction();
-
-            dataTxn.put(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.empty(), mock(NormalizedNode.class));
-
-            verify(configDomStore, times(1)).newReadWriteTransaction();
-            verify(operationalDomStore, times(1)).newReadWriteTransaction();
         }
 
     }
@@ -390,16 +298,11 @@ public class ConcurrentDOMDataBrokerTest {
                 configDomStore), futureExecutor)) {
             DOMDataTreeWriteTransaction dataTxn = dataBroker.newWriteOnlyTransaction();
 
-            dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.empty(), mock(NormalizedNode.class));
-            dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.empty(), mock(NormalizedNode.class));
+            dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of(), mock(ContainerNode.class));
+            dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of(), mock(ContainerNode.class));
 
             verify(configDomStore, never()).newWriteOnlyTransaction();
             verify(operationalDomStore, times(1)).newWriteOnlyTransaction();
-
-            dataTxn.put(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.empty(), mock(NormalizedNode.class));
-
-            verify(configDomStore, times(1)).newWriteOnlyTransaction();
-            verify(operationalDomStore, times(1)).newWriteOnlyTransaction();
         }
     }
 
@@ -417,16 +320,11 @@ public class ConcurrentDOMDataBrokerTest {
                 configDomStore), futureExecutor)) {
             DOMDataTreeReadTransaction dataTxn = dataBroker.newReadOnlyTransaction();
 
-            dataTxn.read(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.empty());
-            dataTxn.read(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.empty());
+            dataTxn.read(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of());
+            dataTxn.read(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of());
 
             verify(configDomStore, never()).newReadOnlyTransaction();
             verify(operationalDomStore, times(1)).newReadOnlyTransaction();
-
-            dataTxn.read(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.empty());
-
-            verify(configDomStore, times(1)).newReadOnlyTransaction();
-            verify(operationalDomStore, times(1)).newReadOnlyTransaction();
         }
     }
 
@@ -435,7 +333,6 @@ public class ConcurrentDOMDataBrokerTest {
         DOMStore configDomStore = mock(DOMStore.class);
         DOMStore operationalDomStore = mock(DOMStore.class);
         DOMStoreReadWriteTransaction mockStoreReadWriteTransaction = mock(DOMStoreReadWriteTransaction.class);
-        DOMStoreThreePhaseCommitCohort mockCohort = mock(DOMStoreThreePhaseCommitCohort.class);
 
         doReturn(mockStoreReadWriteTransaction).when(operationalDomStore).newReadWriteTransaction();
         doReturn(mockCohort).when(mockStoreReadWriteTransaction).ready();
@@ -450,15 +347,15 @@ public class ConcurrentDOMDataBrokerTest {
                 configDomStore), futureExecutor) {
             @Override
             public FluentFuture<? extends CommitInfo> commit(DOMDataTreeWriteTransaction writeTx,
-                    Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
-                commitCohorts.addAll(cohorts);
+                    DOMStoreThreePhaseCommitCohort cohort) {
+                commitCohorts.add(cohort);
                 latch.countDown();
-                return super.commit(writeTx, cohorts);
+                return super.commit(writeTx, cohort);
             }
         }) {
             DOMDataTreeReadWriteTransaction domDataReadWriteTransaction = dataBroker.newReadWriteTransaction();
 
-            domDataReadWriteTransaction.delete(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.empty());
+            domDataReadWriteTransaction.delete(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of());
 
             domDataReadWriteTransaction.commit();
 
@@ -468,56 +365,6 @@ public class ConcurrentDOMDataBrokerTest {
         }
     }
 
-    @Test
-    public void testSubmitWithOnlyTwoSubTransactions() throws InterruptedException {
-        DOMStore configDomStore = mock(DOMStore.class);
-        DOMStore operationalDomStore = mock(DOMStore.class);
-        DOMStoreReadWriteTransaction operationalTransaction = mock(DOMStoreReadWriteTransaction.class);
-        DOMStoreReadWriteTransaction configTransaction = mock(DOMStoreReadWriteTransaction.class);
-        DOMStoreThreePhaseCommitCohort mockCohortOperational = mock(DOMStoreThreePhaseCommitCohort.class);
-        DOMStoreThreePhaseCommitCohort mockCohortConfig = mock(DOMStoreThreePhaseCommitCohort.class);
-
-        doReturn(operationalTransaction).when(operationalDomStore).newReadWriteTransaction();
-        doReturn(configTransaction).when(configDomStore).newReadWriteTransaction();
-
-        doReturn(mockCohortOperational).when(operationalTransaction).ready();
-        doReturn(immediateFalseFluentFuture()).when(mockCohortOperational).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohortOperational).abort();
-
-        doReturn(mockCohortConfig).when(configTransaction).ready();
-        doReturn(immediateFalseFluentFuture()).when(mockCohortConfig).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohortConfig).abort();
-
-        final CountDownLatch latch = new CountDownLatch(1);
-        final List<DOMStoreThreePhaseCommitCohort> commitCohorts = new ArrayList<>();
-
-        try (ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(
-                LogicalDatastoreType.OPERATIONAL, operationalDomStore, LogicalDatastoreType.CONFIGURATION,
-                configDomStore), futureExecutor) {
-            @Override
-            @SuppressWarnings("checkstyle:hiddenField")
-            public FluentFuture<? extends CommitInfo> commit(DOMDataTreeWriteTransaction writeTx,
-                    Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
-                commitCohorts.addAll(cohorts);
-                latch.countDown();
-                return super.commit(writeTx, cohorts);
-            }
-        }) {
-            DOMDataTreeReadWriteTransaction domDataReadWriteTransaction = dataBroker.newReadWriteTransaction();
-
-            domDataReadWriteTransaction.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.empty(),
-                    mock(NormalizedNode.class));
-            domDataReadWriteTransaction.merge(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.empty(),
-                    mock(NormalizedNode.class));
-
-            domDataReadWriteTransaction.commit();
-
-            assertTrue(latch.await(10, TimeUnit.SECONDS));
-
-            assertTrue(commitCohorts.size() == 2);
-        }
-    }
-
     @Test
     public void testCreateTransactionChain() {
         DOMStore domStore = mock(DOMStore.class);
@@ -525,7 +372,7 @@ public class ConcurrentDOMDataBrokerTest {
                 LogicalDatastoreType.OPERATIONAL, domStore, LogicalDatastoreType.CONFIGURATION, domStore),
                 futureExecutor)) {
 
-            dataBroker.createTransactionChain(mock(DOMTransactionChainListener.class));
+            dataBroker.createTransactionChain();
 
             verify(domStore, times(2)).createTransactionChain();
         }
@@ -545,15 +392,14 @@ public class ConcurrentDOMDataBrokerTest {
             doReturn(mockChain).when(domStore).createTransactionChain();
             doReturn(operationalTransaction).when(mockChain).newWriteOnlyTransaction();
 
-            DOMTransactionChain transactionChain = dataBroker.createTransactionChain(
-                    mock(DOMTransactionChainListener.class));
+            DOMTransactionChain transactionChain = dataBroker.createTransactionChain();
 
             DOMDataTreeWriteTransaction domDataWriteTransaction = transactionChain.newWriteOnlyTransaction();
 
             verify(mockChain, never()).newWriteOnlyTransaction();
 
-            domDataWriteTransaction.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.empty(),
-                    mock(NormalizedNode.class));
+            domDataWriteTransaction.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of(),
+                    mock(ContainerNode.class));
         }
     }
 
@@ -580,26 +426,21 @@ public class ConcurrentDOMDataBrokerTest {
 
     @Test
     public void testExtensions() {
-        DistributedDataStore mockConfigStore = mock(DistributedDataStore.class);
-        DistributedDataStore mockOperStore = mock(DistributedDataStore.class);
-        try (ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(
+        final var mockConfigStore = mock(ClientBackedDataStore.class);
+        final var mockOperStore = mock(ClientBackedDataStore.class);
+        try (var dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(
                 LogicalDatastoreType.OPERATIONAL, mockOperStore,
                 LogicalDatastoreType.CONFIGURATION, mockConfigStore), futureExecutor)) {
+            assertNotNull(dataBroker.extension(DataTreeChangeExtension.class));
 
-            ClassToInstanceMap<DOMDataBrokerExtension> supportedExtensions = dataBroker.getExtensions();
-            assertNotNull(supportedExtensions.getInstance(DOMDataTreeChangeService.class));
-
-            DOMDataTreeCommitCohortRegistry cohortRegistry = supportedExtensions.getInstance(
-                DOMDataTreeCommitCohortRegistry.class);
+            final var cohortRegistry = dataBroker.extension(CommitCohortExtension.class);
             assertNotNull(cohortRegistry);
 
-            DOMDataTreeCommitCohort mockCohort = mock(DOMDataTreeCommitCohort.class);
-            DOMDataTreeIdentifier path = new DOMDataTreeIdentifier(
-                    org.opendaylight.mdsal.common.api.LogicalDatastoreType.CONFIGURATION,
-                    YangInstanceIdentifier.empty());
-            cohortRegistry.registerCommitCohort(path, mockCohort);
+            final var cohort = mock(DOMDataTreeCommitCohort.class);
+            final var path = DOMDataTreeIdentifier.of(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.of());
+            cohortRegistry.registerCommitCohort(path, cohort);
 
-            verify(mockConfigStore).registerCommitCohort(path, mockCohort);
+            verify(mockConfigStore).registerCommitCohort(path, cohort);
         }
     }
 }
index 7cd191e828e207f1832c921c93e03e16dcbce75d..37f38810ce5a8a4a344720a6a7dc2e867713fe8c 100644 (file)
@@ -10,8 +10,9 @@ package org.opendaylight.controller.cluster.databroker.actors.dds;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.lenient;
 import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.CLIENT_ID;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.HISTORY_ID;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.TRANSACTION_ID;
@@ -44,17 +45,19 @@ import org.opendaylight.controller.cluster.access.concepts.RequestFailure;
 import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
 import org.opendaylight.controller.cluster.access.concepts.Response;
 import org.opendaylight.controller.cluster.access.concepts.SuccessEnvelope;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
 import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import scala.concurrent.Promise;
 
 @RunWith(MockitoJUnitRunner.StrictStubs.class)
 public abstract class AbstractClientHandleTest<T extends AbstractClientHandle<AbstractProxyTransaction>> {
     private static final String PERSISTENCE_ID = "per-1";
-    private static final YangInstanceIdentifier PATH = YangInstanceIdentifier.empty();
+    private static final YangInstanceIdentifier PATH = YangInstanceIdentifier.of();
 
     @Mock
     private DataTree dataTree;
@@ -87,7 +90,7 @@ public abstract class AbstractClientHandleTest<T extends AbstractClientHandle<Ab
         final InternalCommand<ShardBackendInfo> command = clientContextProbe.expectMsgClass(InternalCommand.class);
         command.execute(client);
         //data tree mock
-        when(dataTree.takeSnapshot()).thenReturn(dataTreeSnapshot);
+        doReturn(dataTreeSnapshot).when(dataTree).takeSnapshot();
 
         handle = createHandle(parent);
     }
@@ -144,8 +147,7 @@ public abstract class AbstractClientHandleTest<T extends AbstractClientHandle<Ab
 
     @Test
     public void testEnsureProxy() {
-        final AbstractProxyTransaction expected = mock(AbstractProxyTransaction.class);
-        final AbstractProxyTransaction proxy = handle.ensureProxy(PATH);
+        final var proxy = handle.ensureProxy(PATH);
         assertEquals(0, proxy.getIdentifier().getTransactionId());
     }
 
@@ -201,8 +203,13 @@ public abstract class AbstractClientHandleTest<T extends AbstractClientHandle<Ab
         final ActorSelection selection = system.actorSelection(actor.path());
         final PrimaryShardInfo shardInfo = new PrimaryShardInfo(selection, (short) 0);
         promise.success(shardInfo);
-        when(mock.findPrimaryShardAsync(any())).thenReturn(promise.future());
+        doReturn(promise.future()).when(mock).findPrimaryShardAsync(any());
+
+        final EffectiveModelContext context = mock(EffectiveModelContext.class);
+        lenient().doCallRealMethod().when(context).getQName();
+        lenient().doReturn(context).when(mock).getSchemaContext();
+        lenient().doReturn(DatastoreContext.newBuilder().build()).when(mock).getDatastoreContext();
+
         return mock;
     }
-
 }
index 8b376f2dd0172cc2a45590c80eae012cc8d159b2..7f934622ab215ac82ffe762661b2a67828ca6f21 100644 (file)
@@ -28,10 +28,11 @@ import org.opendaylight.controller.cluster.access.client.AccessClientUtil;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
 import org.opendaylight.controller.cluster.access.client.ConnectedClientConnection;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
 import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
 import scala.concurrent.Promise;
 import scala.concurrent.impl.Promise.DefaultPromise;
 
@@ -42,6 +43,8 @@ public abstract class AbstractClientHistoryTest<T extends AbstractClientHistory>
 
     @Mock
     private DataTree tree;
+    @Mock
+    private DatastoreContext datastoreContext;
 
     protected abstract T object();
 
@@ -112,7 +115,7 @@ public abstract class AbstractClientHistoryTest<T extends AbstractClientHistory>
 
     @Test
     public void testResolveShardForPath() {
-        final Long shardForPath = object().resolveShardForPath(YangInstanceIdentifier.empty());
+        final Long shardForPath = object().resolveShardForPath(YangInstanceIdentifier.of());
         assertNotNull(shardForPath);
         assertEquals(0L, (long) shardForPath);
     }
@@ -176,13 +179,16 @@ public abstract class AbstractClientHistoryTest<T extends AbstractClientHistory>
         assertNull(reconnectCohort);
     }
 
-    protected static ActorUtils createActorUtilsMock(final ActorSystem system, final ActorRef actor) {
+    protected final ActorUtils createActorUtilsMock(final ActorSystem system, final ActorRef actor) {
         final ActorUtils mock = mock(ActorUtils.class);
         final Promise<PrimaryShardInfo> promise = new DefaultPromise<>();
         final ActorSelection selection = system.actorSelection(actor.path());
         final PrimaryShardInfo shardInfo = new PrimaryShardInfo(selection, (short) 0);
         promise.success(shardInfo);
         doReturn(promise.future()).when(mock).findPrimaryShardAsync(any());
+        doReturn(1000).when(datastoreContext).getShardBatchedModificationCount();
+        doReturn(datastoreContext).when(mock).getDatastoreContext();
+
         return mock;
     }
 }
\ No newline at end of file
index 6528c6ba7c91df095d0962797586e5ceb6566b5e..9254802810b38f3c4cdf2b86f1e6690377bd6eb0 100644 (file)
@@ -7,9 +7,12 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertThrows;
+import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.CLIENT_ID;
 
 import akka.actor.ActorRef;
@@ -18,10 +21,9 @@ import akka.actor.ActorSystem;
 import akka.actor.Status;
 import akka.testkit.TestProbe;
 import akka.testkit.javadsl.TestKit;
-import java.util.Collections;
+import java.util.List;
 import java.util.Optional;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.client.AbstractClientConnection;
@@ -30,12 +32,13 @@ import org.opendaylight.controller.cluster.access.client.ClientActorContext;
 import org.opendaylight.controller.cluster.access.client.InternalCommand;
 import org.opendaylight.controller.cluster.access.commands.ConnectClientRequest;
 import org.opendaylight.controller.cluster.access.commands.ConnectClientSuccess;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
 import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.CursorAwareDataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
 import scala.concurrent.Promise;
 
 public abstract class AbstractDataStoreClientBehaviorTest {
@@ -48,16 +51,17 @@ public abstract class AbstractDataStoreClientBehaviorTest {
     private TestProbe clientActorProbe;
     private TestProbe actorContextProbe;
     private AbstractDataStoreClientBehavior behavior;
+    private ActorUtils util;
 
     @Before
     public void setUp() {
         system = ActorSystem.apply();
         clientActorProbe = new TestProbe(system, "client");
         actorContextProbe = new TestProbe(system, "actor-context");
-        final ActorUtils context = createActorContextMock(system, actorContextProbe.ref());
+        util = createActorContextMock(system, actorContextProbe.ref());
         clientContext =
                 AccessClientUtil.createClientActorContext(system, clientActorProbe.ref(), CLIENT_ID, PERSISTENCE_ID);
-        behavior = createBehavior(clientContext, context);
+        behavior = createBehavior(clientContext, util);
     }
 
     @SuppressWarnings("checkstyle:hiddenField")
@@ -71,7 +75,7 @@ public abstract class AbstractDataStoreClientBehaviorTest {
 
     @Test
     public void testResolveShardForPath() {
-        Assert.assertEquals(0L, behavior.resolveShardForPath(YangInstanceIdentifier.empty()).longValue());
+        assertEquals(0L, behavior.resolveShardForPath(YangInstanceIdentifier.of()).longValue());
     }
 
     @Test
@@ -85,32 +89,32 @@ public abstract class AbstractDataStoreClientBehaviorTest {
         final GetClientRequest request = new GetClientRequest(probe.ref());
         final AbstractDataStoreClientBehavior nextBehavior = behavior.onCommand(request);
         final Status.Success success = probe.expectMsgClass(Status.Success.class);
-        Assert.assertEquals(behavior, success.status());
-        Assert.assertSame(behavior, nextBehavior);
+        assertEquals(behavior, success.status());
+        assertSame(behavior, nextBehavior);
     }
 
     @Test
     public void testOnCommandUnhandled() {
         final AbstractDataStoreClientBehavior nextBehavior = behavior.onCommand("unhandled");
-        Assert.assertSame(behavior, nextBehavior);
+        assertSame(behavior, nextBehavior);
     }
 
     @Test
     public void testCreateLocalHistory() {
         final ClientLocalHistory history = behavior.createLocalHistory();
-        Assert.assertEquals(behavior.getIdentifier(), history.getIdentifier().getClientId());
+        assertEquals(behavior.getIdentifier(), history.getIdentifier().getClientId());
     }
 
     @Test
     public void testCreateTransaction() {
         final ClientTransaction transaction = behavior.createTransaction();
-        Assert.assertEquals(behavior.getIdentifier(), transaction.getIdentifier().getHistoryId().getClientId());
+        assertEquals(behavior.getIdentifier(), transaction.getIdentifier().getHistoryId().getClientId());
     }
 
     @Test
     public void testCreateSnapshot() {
         final ClientSnapshot snapshot = behavior.createSnapshot();
-        Assert.assertEquals(behavior.getIdentifier(), snapshot.getIdentifier().getHistoryId().getClientId());
+        assertEquals(behavior.getIdentifier(), snapshot.getIdentifier().getHistoryId().getClientId());
     }
 
     @Test
@@ -119,48 +123,49 @@ public abstract class AbstractDataStoreClientBehaviorTest {
         final InternalCommand<ShardBackendInfo> internalCommand =
                 clientActorProbe.expectMsgClass(InternalCommand.class);
         internalCommand.execute(behavior);
-        try {
-            behavior.createLocalHistory();
-            Assert.fail("Behavior is closed and shouldn't allow to create new history.");
-        } catch (final IllegalStateException e) {
-            //ok
-        }
+
+        assertThrows(IllegalStateException.class, () -> behavior.createLocalHistory());
     }
 
     @Test
     public void testGetIdentifier() {
-        Assert.assertEquals(CLIENT_ID, behavior.getIdentifier());
+        assertEquals(CLIENT_ID, behavior.getIdentifier());
     }
 
     @Test
     public void testGetConnection() {
+        final var datastoreContext = mock(DatastoreContext.class);
+        doReturn(1000).when(datastoreContext).getShardBatchedModificationCount();
+        doReturn(datastoreContext).when(util).getDatastoreContext();
+
         //set up data tree mock
         final CursorAwareDataTreeModification modification = mock(CursorAwareDataTreeModification.class);
-        when(modification.readNode(YangInstanceIdentifier.empty())).thenReturn(Optional.empty());
+        doReturn(Optional.empty()).when(modification).readNode(YangInstanceIdentifier.of());
         final DataTreeSnapshot snapshot = mock(DataTreeSnapshot.class);
-        when(snapshot.newModification()).thenReturn(modification);
+        doReturn(modification).when(snapshot).newModification();
         final DataTree dataTree = mock(DataTree.class);
-        when(dataTree.takeSnapshot()).thenReturn(snapshot);
+        doReturn(snapshot).when(dataTree).takeSnapshot();
 
         final TestProbe backendProbe = new TestProbe(system, "backend");
         final long shard = 0L;
-        behavior.createTransaction().read(YangInstanceIdentifier.empty());
+
+        behavior.createTransaction().read(YangInstanceIdentifier.of());
         final AbstractClientConnection<ShardBackendInfo> connection = behavior.getConnection(shard);
         //check cached connection for same shard
-        Assert.assertSame(connection, behavior.getConnection(shard));
+        assertSame(connection, behavior.getConnection(shard));
 
         final ConnectClientRequest connectClientRequest = actorContextProbe.expectMsgClass(ConnectClientRequest.class);
-        Assert.assertEquals(CLIENT_ID, connectClientRequest.getTarget());
+        assertEquals(CLIENT_ID, connectClientRequest.getTarget());
         final long sequence = 0L;
-        Assert.assertEquals(sequence, connectClientRequest.getSequence());
-        actorContextProbe.reply(new ConnectClientSuccess(CLIENT_ID, sequence, backendProbe.ref(),
-                Collections.emptyList(), dataTree, 3));
-        Assert.assertEquals(clientActorProbe.ref(), connection.localActor());
+        assertEquals(sequence, connectClientRequest.getSequence());
+        actorContextProbe.reply(new ConnectClientSuccess(CLIENT_ID, sequence, backendProbe.ref(), List.of(), dataTree,
+                3));
+        assertEquals(clientActorProbe.ref(), connection.localActor());
         //capture and execute command passed to client context
         final InternalCommand<ShardBackendInfo> command = clientActorProbe.expectMsgClass(InternalCommand.class);
         command.execute(behavior);
         //check, whether command was reaplayed
-        verify(modification).readNode(YangInstanceIdentifier.empty());
+        verify(modification).readNode(YangInstanceIdentifier.of());
     }
 
     private static ActorUtils createActorContextMock(final ActorSystem system, final ActorRef actor) {
@@ -169,8 +174,7 @@ public abstract class AbstractDataStoreClientBehaviorTest {
         final ActorSelection selection = system.actorSelection(actor.path());
         final PrimaryShardInfo shardInfo = new PrimaryShardInfo(selection, (short) 0);
         promise.success(shardInfo);
-        when(mock.findPrimaryShardAsync(SHARD)).thenReturn(promise.future());
+        doReturn(promise.future()).when(mock).findPrimaryShardAsync(SHARD);
         return mock;
     }
-
 }
index 167c5bc348467ce6d09ac347aa349ce2aa5e2ac6..b28c84b09e64c75a710f961053fbcbcf693c9dfe 100644 (file)
@@ -14,6 +14,7 @@ import static org.hamcrest.core.Is.isA;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
@@ -60,12 +61,16 @@ import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifie
 import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope;
 import org.opendaylight.controller.cluster.access.concepts.Response;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.CursorAwareDataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
 
 @RunWith(MockitoJUnitRunner.StrictStubs.class)
 public abstract class AbstractProxyTransactionTest<T extends AbstractProxyTransaction> {
@@ -82,11 +87,11 @@ public abstract class AbstractProxyTransactionTest<T extends AbstractProxyTransa
     protected static final YangInstanceIdentifier PATH_3 = YangInstanceIdentifier.builder()
             .node(QName.create("ns-1", "node-3"))
             .build();
-    protected static final ContainerNode DATA_1 = Builders.containerBuilder()
-            .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(PATH_1.getLastPathArgument().getNodeType()))
+    protected static final ContainerNode DATA_1 = ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(PATH_1.getLastPathArgument().getNodeType()))
             .build();
-    protected static final ContainerNode DATA_2 = Builders.containerBuilder()
-            .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(PATH_2.getLastPathArgument().getNodeType()))
+    protected static final ContainerNode DATA_2 = ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(PATH_2.getLastPathArgument().getNodeType()))
             .build();
     protected static final String PERSISTENCE_ID = "per-1";
 
@@ -94,6 +99,11 @@ public abstract class AbstractProxyTransactionTest<T extends AbstractProxyTransa
     private DataTreeSnapshot snapshot;
     @Mock
     private AbstractClientHistory history;
+    @Mock
+    private DatastoreContext datastoreContext;
+    @Mock
+    private ActorUtils actorUtils;
+
     private ActorSystem system;
     private TestProbe backendProbe;
     private TestProbe clientContextProbe;
@@ -108,15 +118,22 @@ public abstract class AbstractProxyTransactionTest<T extends AbstractProxyTransa
         backendProbe = new TestProbe(system, "backend");
         context = AccessClientUtil.createClientActorContext(system, clientContextProbe.ref(), CLIENT_ID,
                 PERSISTENCE_ID);
-        final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.BORON,
+        final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.current(),
                 "default", UnsignedLong.ZERO, Optional.empty(), 3);
         final AbstractClientConnection<ShardBackendInfo> connection =
                 AccessClientUtil.createConnectedConnection(context, 0L, backend);
+
         final ProxyHistory parent = ProxyHistory.createClient(history, connection, HISTORY_ID);
         transaction = createTransaction(parent, TestUtils.TRANSACTION_ID, snapshot);
         tester = new TransactionTester<>(transaction, connection, backendProbe);
     }
 
+    protected final void mockForRemote() {
+        doReturn(1000).when(datastoreContext).getShardBatchedModificationCount();
+        doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
+        doReturn(actorUtils).when(history).actorUtils();
+    }
+
     @SuppressWarnings("checkstyle:hiddenField")
     protected abstract T createTransaction(ProxyHistory parent, TransactionIdentifier id, DataTreeSnapshot snapshot);
 
@@ -198,8 +215,7 @@ public abstract class AbstractProxyTransactionTest<T extends AbstractProxyTransa
         final ModifyTransactionRequest transformed = successor.expectTransactionRequest(ModifyTransactionRequest.class);
         assertNotNull(transformed);
         assertEquals(successful1.getSequence(), transformed.getSequence());
-        assertTrue(transformed.getPersistenceProtocol().isPresent());
-        assertEquals(PersistenceProtocol.ABORT, transformed.getPersistenceProtocol().get());
+        assertEquals(Optional.of(PersistenceProtocol.ABORT), transformed.getPersistenceProtocol());
 
         ReadTransactionRequest tmpRead = successor.expectTransactionRequest(ReadTransactionRequest.class);
         assertNotNull(tmpRead);
@@ -232,11 +248,11 @@ public abstract class AbstractProxyTransactionTest<T extends AbstractProxyTransa
     }
 
     @SuppressWarnings("checkstyle:hiddenField")
-    protected <R extends TransactionRequest<R>> void testRequestResponse(final Consumer<VotingFuture<Void>> consumer,
+    protected <R extends TransactionRequest<R>> void testRequestResponse(final Consumer<VotingFuture<Empty>> consumer,
             final Class<R> expectedRequest,
             final BiFunction<TransactionIdentifier, Long, TransactionSuccess<?>> replySupplier) {
         final TransactionTester<T> tester = getTester();
-        final VotingFuture<Void> future = mock(VotingFuture.class);
+        final VotingFuture<Empty> future = mock(VotingFuture.class);
         transaction.seal();
         consumer.accept(future);
         final TransactionRequest<?> req = tester.expectTransactionRequest(expectedRequest);
@@ -303,7 +319,7 @@ public abstract class AbstractProxyTransactionTest<T extends AbstractProxyTransa
         final TestProbe clientContextProbe = new TestProbe(system, "clientContext2");
         final ClientActorContext context =
                 AccessClientUtil.createClientActorContext(system, clientContextProbe.ref(), CLIENT_ID, PERSISTENCE_ID);
-        final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.BORON,
+        final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.current(),
                 "default", UnsignedLong.ZERO, Optional.empty(), 3);
         final AbstractClientConnection<ShardBackendInfo> connection =
                 AccessClientUtil.createConnectedConnection(context, 0L, backend);
@@ -321,13 +337,18 @@ public abstract class AbstractProxyTransactionTest<T extends AbstractProxyTransa
         final TestProbe clientContextProbe = new TestProbe(system, "remoteClientContext");
         final TestProbe backendProbe = new TestProbe(system, "remoteBackend");
         final AbstractClientHistory history = mock(AbstractClientHistory.class);
+        doReturn(1000).when(datastoreContext).getShardBatchedModificationCount();
+        doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
+        doReturn(actorUtils).when(history).actorUtils();
+
         final ClientActorContext context =
                 AccessClientUtil.createClientActorContext(system, clientContextProbe.ref(), CLIENT_ID, PERSISTENCE_ID);
-        final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.BORON,
+        final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.current(),
                 "default", UnsignedLong.ZERO, Optional.empty(), 5);
         final AbstractClientConnection<ShardBackendInfo> connection =
                 AccessClientUtil.createConnectedConnection(context, 0L, backend);
         final ProxyHistory proxyHistory = ProxyHistory.createClient(history, connection, HISTORY_ID);
+
         final RemoteProxyTransaction transaction =
                 new RemoteProxyTransaction(proxyHistory, TRANSACTION_ID, false, false, false);
         return new TransactionTester<>(transaction, connection, backendProbe);
index 9e8c33254418ad04d9799980a833ad83ac1a4297..cc50a9ba580002eb72faf6fdeb950d2ae161c124 100644 (file)
@@ -9,26 +9,23 @@ package org.opendaylight.controller.cluster.databroker.actors.dds;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.getWithTimeout;
 
-import com.google.common.util.concurrent.ListenableFuture;
 import java.util.Optional;
 import org.junit.Before;
 import org.junit.Test;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
 public class ClientSnapshotTest extends AbstractClientHandleTest<ClientSnapshot> {
-
-    private static final YangInstanceIdentifier PATH = YangInstanceIdentifier.empty();
+    private static final YangInstanceIdentifier PATH = YangInstanceIdentifier.of();
 
     @Before
     @Override
     public void setUp() throws Exception {
         super.setUp();
-        when(getDataTreeSnapshot().readNode(PATH)).thenReturn(Optional.empty());
+        doReturn(Optional.empty()).when(getDataTreeSnapshot()).readNode(PATH);
     }
 
     @Override
@@ -43,15 +40,15 @@ public class ClientSnapshotTest extends AbstractClientHandleTest<ClientSnapshot>
 
     @Test
     public void testExists() throws Exception {
-        final ListenableFuture<Boolean> exists = getHandle().exists(PATH);
+        final var exists = getHandle().exists(PATH);
         verify(getDataTreeSnapshot()).readNode(PATH);
         assertEquals(Boolean.FALSE, getWithTimeout(exists));
     }
 
     @Test
     public void testRead() throws Exception {
-        final ListenableFuture<Optional<NormalizedNode>> exists = getHandle().read(PATH);
+        final var read = getHandle().read(PATH);
         verify(getDataTreeSnapshot()).readNode(PATH);
-        assertFalse(getWithTimeout(exists).isPresent());
+        assertFalse(getWithTimeout(read).isPresent());
     }
 }
index 8b58e1587790384f10ae0635d740c1f48a5f2470..e54b275c9516fd858aaeb6a0461f0ac1ed2e1c70 100644 (file)
@@ -8,6 +8,7 @@
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
 import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.doReturn;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.CLIENT_ID;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.HISTORY_ID;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.TRANSACTION_ID;
@@ -48,17 +49,26 @@ import org.opendaylight.controller.cluster.access.commands.TransactionPreCommitR
 import org.opendaylight.controller.cluster.access.commands.TransactionPreCommitSuccess;
 import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
 import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
+import org.opendaylight.mdsal.common.api.CommitInfo;
+import org.opendaylight.yangtools.yang.common.Empty;
 
 @RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class ClientTransactionCommitCohortTest {
-
     private static final String PERSISTENCE_ID = "per-1";
     private static final int TRANSACTIONS = 3;
 
+    private final List<TransactionTester<RemoteProxyTransaction>> transactions = new ArrayList<>();
+
     @Mock
     private AbstractClientHistory history;
+    @Mock
+    private DatastoreContext datastoreContext;
+    @Mock
+    private ActorUtils actorUtils;
+
     private ActorSystem system;
-    private List<TransactionTester<RemoteProxyTransaction>> transactions;
     private ClientTransactionCommitCohort cohort;
 
     @Before
@@ -67,7 +77,10 @@ public class ClientTransactionCommitCohortTest {
         final TestProbe clientContextProbe = new TestProbe(system, "clientContext");
         final ClientActorContext context =
                 AccessClientUtil.createClientActorContext(system, clientContextProbe.ref(), CLIENT_ID, PERSISTENCE_ID);
-        transactions = new ArrayList<>();
+        doReturn(1000).when(datastoreContext).getShardBatchedModificationCount();
+        doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
+        doReturn(actorUtils).when(history).actorUtils();
+
         for (int i = 0; i < TRANSACTIONS; i++) {
             transactions.add(createTransactionTester(new TestProbe(system, "backend" + i), context, history));
         }
@@ -97,7 +110,7 @@ public class ClientTransactionCommitCohortTest {
     @Test
     public void testPreCommit() throws Exception {
         testOpSuccess(ClientTransactionCommitCohort::preCommit, this::expectPreCommit, this::replyPreCommitSuccess,
-                null);
+            Empty.value());
     }
 
     @Test
@@ -107,7 +120,8 @@ public class ClientTransactionCommitCohortTest {
 
     @Test
     public void testCommit() throws Exception {
-        testOpSuccess(ClientTransactionCommitCohort::commit, this::expectCommit, this::replyCommitSuccess, null);
+        testOpSuccess(ClientTransactionCommitCohort::commit, this::expectCommit, this::replyCommitSuccess,
+            CommitInfo.empty());
     }
 
     @Test
@@ -117,7 +131,7 @@ public class ClientTransactionCommitCohortTest {
 
     @Test
     public void testAbort() throws Exception {
-        testOpSuccess(ClientTransactionCommitCohort::abort, this::expectAbort, this::replyAbortSuccess, null);
+        testOpSuccess(ClientTransactionCommitCohort::abort, this::expectAbort, this::replyAbortSuccess, Empty.value());
     }
 
     @Test
@@ -169,7 +183,7 @@ public class ClientTransactionCommitCohortTest {
     private static TransactionTester<RemoteProxyTransaction> createTransactionTester(final TestProbe backendProbe,
                                                              final ClientActorContext context,
                                                              final AbstractClientHistory history) {
-        final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.BORON,
+        final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.current(),
                 "default", UnsignedLong.ZERO, Optional.empty(), 3);
         final AbstractClientConnection<ShardBackendInfo> connection =
                 AccessClientUtil.createConnectedConnection(context, 0L, backend);
index 69494f30fffc6104fdca36975ede82ca23dc4ed0..74b3b6252d356ef371d4bdbff750daf471634475 100644 (file)
@@ -8,7 +8,6 @@
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.TRANSACTION_ID;
@@ -23,22 +22,22 @@ import org.junit.Test;
 import org.mockito.Mock;
 import org.opendaylight.controller.cluster.access.commands.CommitLocalTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.TransactionCommitSuccess;
+import org.opendaylight.mdsal.common.api.CommitInfo;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.CursorAwareDataTreeModification;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeModification;
 
 public class ClientTransactionTest extends AbstractClientHandleTest<ClientTransaction> {
-
-    private static final YangInstanceIdentifier PATH = YangInstanceIdentifier.builder()
-            .node(QName.create("ns-1", "node-1"))
-            .build();
-    private static final ContainerNode DATA = Builders.containerBuilder()
-            .withNodeIdentifier(YangInstanceIdentifier.NodeIdentifier.create(PATH.getLastPathArgument().getNodeType()))
-            .build();
+    private static final YangInstanceIdentifier PATH = YangInstanceIdentifier.of(QName.create("ns-1", "node-1"));
+    private static final ContainerNode DATA = ImmutableNodes.newContainerBuilder()
+        .withNodeIdentifier(NodeIdentifier.create(PATH.getLastPathArgument().getNodeType()))
+        .build();
 
     @Mock
     private CursorAwareDataTreeModification modification;
@@ -72,9 +71,7 @@ public class ClientTransactionTest extends AbstractClientHandleTest<ClientTransa
     public void testRead() throws Exception {
         final FluentFuture<Optional<NormalizedNode>> resultFuture = getHandle().read(PATH);
         verify(modification).readNode(PATH);
-        final Optional<NormalizedNode> result = getWithTimeout(resultFuture);
-        assertTrue(result.isPresent());
-        assertEquals(DATA, result.get());
+        assertEquals(Optional.of(DATA), getWithTimeout(resultFuture));
     }
 
     @Test
@@ -99,8 +96,8 @@ public class ClientTransactionTest extends AbstractClientHandleTest<ClientTransa
     public void testReadyEmpty() throws Exception {
         final DOMStoreThreePhaseCommitCohort cohort = getHandle().ready();
         assertFutureEquals(Boolean.TRUE, cohort.canCommit());
-        assertFutureEquals(null, cohort.preCommit());
-        assertFutureEquals(null, cohort.commit());
+        assertFutureEquals(Empty.value(), cohort.preCommit());
+        assertFutureEquals(CommitInfo.empty(), cohort.commit());
     }
 
     @Test
@@ -113,8 +110,8 @@ public class ClientTransactionTest extends AbstractClientHandleTest<ClientTransa
                 backendRespondToRequest(CommitLocalTransactionRequest.class, response);
         assertEquals(modification, request.getModification());
         assertFutureEquals(Boolean.TRUE, actual);
-        assertFutureEquals(null, cohort.preCommit());
-        assertFutureEquals(null, cohort.commit());
+        assertFutureEquals(Empty.value(), cohort.preCommit());
+        assertFutureEquals(CommitInfo.empty(), cohort.commit());
     }
 
     @Test
index 1dd86b0028910beee395f43e3733647ad7294f4c..32f062ec6ba623fc6f9dbf9ec490e38cf20790fe 100644 (file)
@@ -8,8 +8,8 @@
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.verify;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.CLIENT_ID;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.HISTORY_ID;
@@ -36,14 +36,20 @@ import org.opendaylight.controller.cluster.access.commands.ModifyTransactionRequ
 import org.opendaylight.controller.cluster.access.commands.PersistenceProtocol;
 import org.opendaylight.controller.cluster.access.commands.TransactionCommitSuccess;
 import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 
 @RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class DirectTransactionCommitCohortTest {
-
     private static final String PERSISTENCE_ID = "per-1";
 
     @Mock
     private AbstractClientHistory history;
+    @Mock
+    private DatastoreContext datastoreContext;
+    @Mock
+    private ActorUtils actorUtils;
+
     private ActorSystem system;
     private TransactionTester<?> transaction;
     private DirectTransactionCommitCohort cohort;
@@ -54,6 +60,10 @@ public class DirectTransactionCommitCohortTest {
         final TestProbe clientContextProbe = new TestProbe(system, "clientContext");
         final ClientActorContext context =
                 AccessClientUtil.createClientActorContext(system, clientContextProbe.ref(), CLIENT_ID, PERSISTENCE_ID);
+        doReturn(1000).when(datastoreContext).getShardBatchedModificationCount();
+        doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
+        doReturn(actorUtils).when(history).actorUtils();
+
         transaction = createTransactionTester(new TestProbe(system, "backend"), context, history);
         final AbstractProxyTransaction proxy = transaction.getTransaction();
         proxy.seal();
@@ -69,8 +79,7 @@ public class DirectTransactionCommitCohortTest {
     public void testCanCommit() throws Exception {
         final ListenableFuture<Boolean> canCommit = cohort.canCommit();
         final ModifyTransactionRequest request = transaction.expectTransactionRequest(ModifyTransactionRequest.class);
-        assertTrue(request.getPersistenceProtocol().isPresent());
-        assertEquals(PersistenceProtocol.SIMPLE, request.getPersistenceProtocol().get());
+        assertEquals(Optional.of(PersistenceProtocol.SIMPLE), request.getPersistenceProtocol());
         final RequestSuccess<?, ?> success = new TransactionCommitSuccess(transaction.getTransaction().getIdentifier(),
                 transaction.getLastReceivedMessage().getSequence());
         transaction.replySuccess(success);
@@ -79,28 +88,28 @@ public class DirectTransactionCommitCohortTest {
 
     @Test
     public void testPreCommit() throws Exception {
-        final ListenableFuture<Void> preCommit = cohort.preCommit();
-        assertNull(getWithTimeout(preCommit));
+        final ListenableFuture<?> preCommit = cohort.preCommit();
+        assertNotNull(getWithTimeout(preCommit));
     }
 
     @Test
     public void testAbort() throws Exception {
-        final ListenableFuture<Void> abort = cohort.abort();
+        final ListenableFuture<?> abort = cohort.abort();
         verify(history).onTransactionComplete(transaction.getTransaction().getIdentifier());
-        assertNull(getWithTimeout(abort));
+        assertNotNull(getWithTimeout(abort));
     }
 
     @Test
     public void testCommit() throws Exception {
-        final ListenableFuture<Void> commit = cohort.commit();
+        final ListenableFuture<?> commit = cohort.commit();
         verify(history).onTransactionComplete(transaction.getTransaction().getIdentifier());
-        assertNull(getWithTimeout(commit));
+        assertNotNull(getWithTimeout(commit));
     }
 
     private static TransactionTester<?> createTransactionTester(final TestProbe backendProbe,
                                                                 final ClientActorContext context,
                                                                 final AbstractClientHistory history) {
-        final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.BORON,
+        final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.current(),
                 "default", UnsignedLong.ZERO, Optional.empty(), 3);
         final AbstractClientConnection<ShardBackendInfo> connection =
                 AccessClientUtil.createConnectedConnection(context, 0L, backend);
index a546955ab82ae306abfa9126e9a49edfa6ce2597..312edb335c9a01ad195af5fdb3ac984a6e252d50 100644 (file)
@@ -8,10 +8,12 @@
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
 import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
 
+import java.util.Set;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
+import org.opendaylight.controller.cluster.datastore.config.Configuration;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
@@ -20,11 +22,16 @@ public class DistributedDataStoreClientBehaviorTest extends AbstractDataStoreCli
     @Override
     protected AbstractDataStoreClientBehavior createBehavior(final ClientActorContext clientContext,
                                                              final ActorUtils context) {
-        final ShardStrategyFactory factory = mock(ShardStrategyFactory.class);
         final ShardStrategy strategy = mock(ShardStrategy.class);
-        when(strategy.findShard(any())).thenReturn(SHARD);
-        when(factory.getStrategy(any())).thenReturn(strategy);
-        when(context.getShardStrategyFactory()).thenReturn(factory);
+        doReturn(SHARD).when(strategy).findShard(any());
+        final ShardStrategyFactory factory = mock(ShardStrategyFactory.class);
+        doReturn(strategy).when(factory).getStrategy(any());
+        doReturn(factory).when(context).getShardStrategyFactory();
+
+        final Configuration config = mock(Configuration.class);
+        doReturn(Set.of(SHARD)).when(config).getAllShardNames();
+        doReturn(config).when(context).getConfiguration();
+
         return new DistributedDataStoreClientBehavior(clientContext, context);
     }
 }
index 60408ae149707aa79132da791edadac514a37863..fbfdc0a924cdec64f3d7dc89c2b711d5b2a7a7b5 100644 (file)
@@ -8,7 +8,7 @@
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertNotNull;
 import static org.mockito.Mockito.verify;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.TRANSACTION_ID;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.getWithTimeout;
@@ -40,22 +40,21 @@ public class EmptyTransactionCommitCohortTest {
 
     @Test
     public void testPreCommit() throws Exception {
-        final ListenableFuture<Void> preCommit = cohort.preCommit();
-        assertNull(getWithTimeout(preCommit));
+        assertNotNull(getWithTimeout(cohort.preCommit()));
     }
 
     @Test
     public void testAbort() throws Exception {
-        final ListenableFuture<Void> abort = cohort.abort();
+        final ListenableFuture<?> abort = cohort.abort();
         verify(history).onTransactionComplete(TRANSACTION_ID);
-        assertNull(getWithTimeout(abort));
+        assertNotNull(getWithTimeout(abort));
     }
 
     @Test
     public void testCommit() throws Exception {
-        final ListenableFuture<Void> commit = cohort.commit();
+        final ListenableFuture<?> commit = cohort.commit();
         verify(history).onTransactionComplete(TRANSACTION_ID);
-        assertNull(getWithTimeout(commit));
+        assertNotNull(getWithTimeout(commit));
     }
 
 }
\ No newline at end of file
index 2543ca8247fc8886c6fd2a5b61853dc831a5439f..104981c1fd6839899e01f6aea578e29f2d6635a1 100644 (file)
@@ -7,6 +7,8 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
@@ -17,7 +19,6 @@ import akka.testkit.TestProbe;
 import com.google.common.base.Ticker;
 import java.util.Optional;
 import java.util.function.Consumer;
-import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
 import org.mockito.invocation.InvocationOnMock;
@@ -35,8 +36,8 @@ import org.opendaylight.controller.cluster.access.commands.ReadTransactionSucces
 import org.opendaylight.controller.cluster.access.commands.TransactionPurgeRequest;
 import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
 import org.opendaylight.controller.cluster.access.concepts.Response;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.CursorAwareDataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModificationCursor;
+import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModificationCursor;
 
 public abstract class LocalProxyTransactionTest<T extends LocalProxyTransaction>
         extends AbstractProxyTransactionTest<T> {
@@ -64,7 +65,7 @@ public abstract class LocalProxyTransactionTest<T extends LocalProxyTransaction>
     @SuppressWarnings("unchecked")
     private void setupExecuteInActor() {
         doAnswer(inv -> {
-            inv.<InternalCommand<?>>getArgument(0).execute(mock(ClientActorBehavior.class));
+            inv.getArgument(0, InternalCommand.class).execute(mock(ClientActorBehavior.class));
             return null;
         }).when(context).executeInActor(any(InternalCommand.class));
     }
@@ -81,10 +82,9 @@ public abstract class LocalProxyTransactionTest<T extends LocalProxyTransaction>
         final ArgumentCaptor<Response<?, ?>> captor = ArgumentCaptor.forClass(Response.class);
         verify(callback).accept(captor.capture());
         final Response<?, ?> value = captor.getValue();
-        Assert.assertTrue(value instanceof ReadTransactionSuccess);
+        assertTrue(value instanceof ReadTransactionSuccess);
         final ReadTransactionSuccess success = (ReadTransactionSuccess) value;
-        Assert.assertTrue(success.getData().isPresent());
-        Assert.assertEquals(DATA_1, success.getData().get());
+        assertEquals(Optional.of(DATA_1), success.getData());
     }
 
     @Test
@@ -99,16 +99,15 @@ public abstract class LocalProxyTransactionTest<T extends LocalProxyTransaction>
         final ArgumentCaptor<Response<?, ?>> captor = ArgumentCaptor.forClass(Response.class);
         verify(callback).accept(captor.capture());
         final Response<?, ?> value = captor.getValue();
-        Assert.assertTrue(value instanceof ExistsTransactionSuccess);
+        assertTrue(value instanceof ExistsTransactionSuccess);
         final ExistsTransactionSuccess success = (ExistsTransactionSuccess) value;
-        Assert.assertTrue(success.getExists());
+        assertTrue(success.getExists());
     }
 
     @Test
     public void testHandleForwardedRemotePurgeRequest() {
         final TestProbe probe = createProbe();
-        final TransactionPurgeRequest request =
-                new TransactionPurgeRequest(TRANSACTION_ID, 0L, probe.ref());
+        final TransactionPurgeRequest request = new TransactionPurgeRequest(TRANSACTION_ID, 0L, probe.ref());
         testHandleForwardedRemoteRequest(request);
     }
 
@@ -118,8 +117,7 @@ public abstract class LocalProxyTransactionTest<T extends LocalProxyTransaction>
         final TestProbe probe = createProbe();
         final AbortLocalTransactionRequest request = new AbortLocalTransactionRequest(TRANSACTION_ID, probe.ref());
         final ModifyTransactionRequest modifyRequest = testForwardToRemote(request, ModifyTransactionRequest.class);
-        Assert.assertTrue(modifyRequest.getPersistenceProtocol().isPresent());
-        Assert.assertEquals(PersistenceProtocol.ABORT, modifyRequest.getPersistenceProtocol().get());
+        assertEquals(Optional.of(PersistenceProtocol.ABORT), modifyRequest.getPersistenceProtocol());
     }
 
     @Override
@@ -132,8 +130,7 @@ public abstract class LocalProxyTransactionTest<T extends LocalProxyTransaction>
         doAnswer(LocalProxyTransactionTest::applyToCursorAnswer).when(modification).applyToCursor(any());
         final ModifyTransactionRequest modifyRequest = testForwardToRemote(request, ModifyTransactionRequest.class);
         verify(modification).applyToCursor(any());
-        Assert.assertTrue(modifyRequest.getPersistenceProtocol().isPresent());
-        Assert.assertEquals(PersistenceProtocol.THREE_PHASE, modifyRequest.getPersistenceProtocol().get());
+        assertEquals(Optional.of(PersistenceProtocol.THREE_PHASE), modifyRequest.getPersistenceProtocol());
         checkModifications(modifyRequest);
     }
 
@@ -152,7 +149,7 @@ public abstract class LocalProxyTransactionTest<T extends LocalProxyTransaction>
     }
 
     protected <R extends TransactionRequest<R>> R testForwardToLocal(final TransactionRequest<?> toForward,
-                                                                  final Class<R> expectedMessageClass) {
+                                                                     final Class<R> expectedMessageClass) {
         final Consumer<Response<?, ?>> callback = createCallbackMock();
         final TransactionTester<LocalReadWriteProxyTransaction> transactionTester = createLocalProxy();
         final LocalReadWriteProxyTransaction successor = transactionTester.getTransaction();
index 7a4fb742038a5ed6d15305b5437f1fe6565d1a20..651c7d2028591693e76dc7a4c508d779d1803068 100644 (file)
@@ -7,6 +7,9 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.when;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.assertOperationThrowsException;
 
@@ -14,16 +17,14 @@ import akka.testkit.TestProbe;
 import com.google.common.base.Ticker;
 import com.google.common.base.VerifyException;
 import java.util.Optional;
-import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.commands.AbortLocalTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.ModifyTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.ModifyTransactionRequestBuilder;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
 
 public class LocalReadOnlyProxyTransactionTest extends LocalProxyTransactionTest<LocalReadOnlyProxyTransaction> {
-
     private DataTreeSnapshot snapshot;
 
     @Override
@@ -39,74 +40,76 @@ public class LocalReadOnlyProxyTransactionTest extends LocalProxyTransactionTest
 
     @Test
     public void testIsSnapshotOnly() {
-        Assert.assertTrue(transaction.isSnapshotOnly());
+        assertTrue(transaction.isSnapshotOnly());
     }
 
     @Test
     public void testReadOnlyView() {
-        Assert.assertEquals(snapshot, transaction.readOnlyView());
+        assertEquals(snapshot, transaction.readOnlyView());
     }
 
+    @Test
     @Override
-    @Test(expected = UnsupportedOperationException.class)
     public void testDirectCommit() {
-        transaction.directCommit();
+        assertThrows(UnsupportedOperationException.class, () -> transaction.directCommit());
     }
 
+    @Test
     @Override
-    @Test(expected = UnsupportedOperationException.class)
     public void testCanCommit() {
-        transaction.canCommit(new VotingFuture<>(new Object(), 1));
+        assertThrows(UnsupportedOperationException.class,
+            () -> transaction.canCommit(new VotingFuture<>(new Object(), 1)));
     }
 
+    @Test
     @Override
-    @Test(expected = UnsupportedOperationException.class)
     public void testPreCommit() {
-        transaction.preCommit(new VotingFuture<>(new Object(), 1));
+        assertThrows(UnsupportedOperationException.class,
+            () -> transaction.preCommit(new VotingFuture<>(new Object(), 1)));
     }
 
+    @Test
     @Override
-    @Test(expected = UnsupportedOperationException.class)
     public void testDoCommit() {
-        transaction.doCommit(new VotingFuture<>(new Object(), 1));
+        assertThrows(UnsupportedOperationException.class,
+            () -> transaction.doCommit(new VotingFuture<>(new Object(), 1)));
     }
 
+    @Test
     @Override
-    @Test(expected = UnsupportedOperationException.class)
     public void testDelete() {
-        transaction.delete(PATH_1);
+        assertThrows(UnsupportedOperationException.class, () -> transaction.delete(PATH_1));
     }
 
     @Override
-    @Test(expected = UnsupportedOperationException.class)
     public void testMerge() {
-        transaction.merge(PATH_1, DATA_1);
+        assertThrows(UnsupportedOperationException.class, () -> transaction.merge(PATH_1, DATA_1));
     }
 
+    @Test
     @Override
-    @Test(expected = UnsupportedOperationException.class)
     public void testWrite() {
-        transaction.write(PATH_1, DATA_1);
+        assertThrows(UnsupportedOperationException.class, () -> transaction.write(PATH_1, DATA_1));
     }
 
-    @Test(expected = UnsupportedOperationException.class)
+    @Test
     public void testDoDelete() {
-        transaction.doDelete(PATH_1);
+        assertThrows(UnsupportedOperationException.class, () -> transaction.doDelete(PATH_1));
     }
 
-    @Test(expected = UnsupportedOperationException.class)
+    @Test
     public void testDoMerge() {
-        transaction.doMerge(PATH_1, DATA_1);
+        assertThrows(UnsupportedOperationException.class, () -> transaction.doMerge(PATH_1, DATA_1));
     }
 
-    @Test(expected = UnsupportedOperationException.class)
+    @Test
     public void testDoWrite() {
-        transaction.doWrite(PATH_1, DATA_1);
+        assertThrows(UnsupportedOperationException.class, () -> transaction.doWrite(PATH_1, DATA_1));
     }
 
-    @Test(expected = UnsupportedOperationException.class)
+    @Test
     public void testCommitRequest() {
-        transaction.commitRequest(true);
+        assertThrows(UnsupportedOperationException.class, () -> transaction.commitRequest(true));
     }
 
     @Test
index 2a38f183ef05443d7b7ccaf298e0c9290147f532..ad772d0703e25ceac187fd062362b2c06c6ef9cd 100644 (file)
@@ -7,6 +7,8 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
@@ -20,7 +22,6 @@ import com.google.common.base.Ticker;
 import com.google.common.util.concurrent.ListenableFuture;
 import java.util.Optional;
 import java.util.function.Consumer;
-import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.Mock;
 import org.opendaylight.controller.cluster.access.commands.AbortLocalTransactionRequest;
@@ -40,9 +41,9 @@ import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.TransactionWrite;
 import org.opendaylight.controller.cluster.access.concepts.Response;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.CursorAwareDataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
 
 public class LocalReadWriteProxyTransactionTest extends LocalProxyTransactionTest<LocalReadWriteProxyTransaction> {
     @Mock
@@ -60,12 +61,12 @@ public class LocalReadWriteProxyTransactionTest extends LocalProxyTransactionTes
 
     @Test
     public void testIsSnapshotOnly() {
-        Assert.assertFalse(transaction.isSnapshotOnly());
+        assertFalse(transaction.isSnapshotOnly());
     }
 
     @Test
     public void testReadOnlyView() {
-        Assert.assertEquals(modification, transaction.readOnlyView());
+        assertEquals(modification, transaction.readOnlyView());
     }
 
     @Test
@@ -125,8 +126,8 @@ public class LocalReadWriteProxyTransactionTest extends LocalProxyTransactionTes
         transaction.doWrite(PATH_1, DATA_1);
         final boolean coordinated = true;
         final CommitLocalTransactionRequest request = transaction.commitRequest(coordinated);
-        Assert.assertEquals(coordinated, request.isCoordinated());
-        Assert.assertEquals(modification, request.getModification());
+        assertEquals(coordinated, request.isCoordinated());
+        assertEquals(modification, request.getModification());
     }
 
     @Test
@@ -141,7 +142,7 @@ public class LocalReadWriteProxyTransactionTest extends LocalProxyTransactionTes
     public void testSealOnly() throws Exception {
         assertOperationThrowsException(() -> transaction.getSnapshot(), IllegalStateException.class);
         transaction.sealOnly();
-        Assert.assertEquals(modification, transaction.getSnapshot());
+        assertEquals(modification, transaction.getSnapshot());
     }
 
     @Test
@@ -150,7 +151,7 @@ public class LocalReadWriteProxyTransactionTest extends LocalProxyTransactionTes
         final RemoteProxyTransaction successor = transactionTester.getTransaction();
         doAnswer(LocalProxyTransactionTest::applyToCursorAnswer).when(modification).applyToCursor(any());
         transaction.sealOnly();
-        final TransactionRequest<?> request = transaction.flushState().get();
+        final TransactionRequest<?> request = transaction.flushState().orElseThrow();
         transaction.forwardToSuccessor(successor, request, null);
         verify(modification).applyToCursor(any());
         transactionTester.getTransaction().seal();
@@ -244,8 +245,8 @@ public class LocalReadWriteProxyTransactionTest extends LocalProxyTransactionTes
         verify(modification).delete(PATH_3);
         final CommitLocalTransactionRequest commitRequest =
                 getTester().expectTransactionRequest(CommitLocalTransactionRequest.class);
-        Assert.assertEquals(modification, commitRequest.getModification());
-        Assert.assertEquals(coordinated, commitRequest.isCoordinated());
+        assertEquals(modification, commitRequest.getModification());
+        assertEquals(coordinated, commitRequest.isCoordinated());
     }
 
 }
index ebd96c974187682807d122ff5d2c5484c181fbab..6ef398c78bb3a390520d03e930537eecb64868f1 100644 (file)
@@ -50,7 +50,7 @@ import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache;
 import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
 import scala.concurrent.Promise;
 
 @RunWith(MockitoJUnitRunner.StrictStubs.class)
@@ -85,7 +85,7 @@ public class ModuleShardBackendResolverTest {
 
         moduleShardBackendResolver = new ModuleShardBackendResolver(CLIENT_ID, actorUtils);
         doReturn(shardStrategyFactory).when(actorUtils).getShardStrategyFactory();
-        doReturn(shardStrategy).when(shardStrategyFactory).getStrategy(YangInstanceIdentifier.empty());
+        doReturn(shardStrategy).when(shardStrategyFactory).getStrategy(YangInstanceIdentifier.of());
         final PrimaryShardInfoFutureCache cache = new PrimaryShardInfoFutureCache();
         doReturn(cache).when(actorUtils).getPrimaryShardInfoCache();
     }
@@ -97,15 +97,15 @@ public class ModuleShardBackendResolverTest {
 
     @Test
     public void testResolveShardForPathNonNullCookie() {
-        doReturn(DefaultShardStrategy.DEFAULT_SHARD).when(shardStrategy).findShard(YangInstanceIdentifier.empty());
-        final Long cookie = moduleShardBackendResolver.resolveShardForPath(YangInstanceIdentifier.empty());
+        doReturn(DefaultShardStrategy.DEFAULT_SHARD).when(shardStrategy).findShard(YangInstanceIdentifier.of());
+        final Long cookie = moduleShardBackendResolver.resolveShardForPath(YangInstanceIdentifier.of());
         assertEquals(0L, (long) cookie);
     }
 
     @Test
     public void testResolveShardForPathNullCookie() {
-        doReturn("foo").when(shardStrategy).findShard(YangInstanceIdentifier.empty());
-        final Long cookie = moduleShardBackendResolver.resolveShardForPath(YangInstanceIdentifier.empty());
+        doReturn("foo").when(shardStrategy).findShard(YangInstanceIdentifier.of());
+        final Long cookie = moduleShardBackendResolver.resolveShardForPath(YangInstanceIdentifier.of());
         assertEquals(1L, (long) cookie);
     }
 
@@ -120,7 +120,7 @@ public class ModuleShardBackendResolverTest {
         final CompletionStage<ShardBackendInfo> stage = moduleShardBackendResolver.getBackendInfo(0L);
         final ShardBackendInfo shardBackendInfo = TestUtils.getWithTimeout(stage.toCompletableFuture());
         assertEquals(0L, shardBackendInfo.getCookie().longValue());
-        assertEquals(dataTree, shardBackendInfo.getDataTree().get());
+        assertEquals(dataTree, shardBackendInfo.getDataTree().orElseThrow());
         assertEquals(DefaultShardStrategy.DEFAULT_SHARD, shardBackendInfo.getName());
     }
 
index 6afecf36b8e300da316b871b750249302e0e5b8f..41847973fb04f6532b6b9d25dbbc3cf38eee86e9 100644 (file)
@@ -12,7 +12,6 @@ import static org.hamcrest.CoreMatchers.hasItem;
 import static org.hamcrest.CoreMatchers.isA;
 import static org.hamcrest.MatcherAssert.assertThat;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.assertFutureEquals;
 
 import akka.testkit.TestProbe;
@@ -41,13 +40,13 @@ import org.opendaylight.controller.cluster.access.commands.TransactionWrite;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
 
 public class RemoteProxyTransactionTest extends AbstractProxyTransactionTest<RemoteProxyTransaction> {
-
     @Override
     protected RemoteProxyTransaction createTransaction(final ProxyHistory parent, final TransactionIdentifier id,
                                                        final DataTreeSnapshot snapshot) {
+        mockForRemote();
         return new RemoteProxyTransaction(parent, TRANSACTION_ID, false, false, false);
     }
 
@@ -101,8 +100,7 @@ public class RemoteProxyTransactionTest extends AbstractProxyTransactionTest<Rem
         final ListenableFuture<Boolean> result = transaction.directCommit();
         final TransactionTester<RemoteProxyTransaction> tester = getTester();
         final ModifyTransactionRequest req = tester.expectTransactionRequest(ModifyTransactionRequest.class);
-        assertTrue(req.getPersistenceProtocol().isPresent());
-        assertEquals(PersistenceProtocol.SIMPLE, req.getPersistenceProtocol().get());
+        assertEquals(Optional.of(PersistenceProtocol.SIMPLE), req.getPersistenceProtocol());
         tester.replySuccess(new TransactionCommitSuccess(TRANSACTION_ID, req.getSequence()));
         assertFutureEquals(true, result);
     }
@@ -181,8 +179,7 @@ public class RemoteProxyTransactionTest extends AbstractProxyTransactionTest<Rem
         final ModifyTransactionRequest request = builder.build();
         final ModifyTransactionRequest received = testForwardToRemote(request, ModifyTransactionRequest.class);
         assertEquals(request.getTarget(), received.getTarget());
-        assertTrue(received.getPersistenceProtocol().isPresent());
-        assertEquals(PersistenceProtocol.ABORT, received.getPersistenceProtocol().get());
+        assertEquals(Optional.of(PersistenceProtocol.ABORT), received.getPersistenceProtocol());
     }
 
     @Test
index 092d262ae8fe034ccc83d073a4b585a961a3713f..f4e0be9c3d9f64b5f8607732da2b6d4ef93bbad0 100644 (file)
@@ -7,9 +7,10 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import static org.junit.Assert.assertEquals;
+
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
-import org.junit.Assert;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendType;
@@ -45,8 +46,8 @@ final class TestUtils {
      * @param <T>      type
      * @throws Exception exception
      */
-    static <T> void assertFutureEquals(final T expected, final Future<T> actual) throws Exception {
-        Assert.assertEquals(expected, getWithTimeout(actual));
+    static <T> void assertFutureEquals(final T expected, final Future<? extends T> actual) throws Exception {
+        assertEquals(expected, getWithTimeout(actual));
     }
 
     /**
index 065a0e97fb6acb0e8af346ad821244e88320192e..f9fb3b830d1b5eb53401b66c6d99efad6e3d53c0 100644 (file)
@@ -16,7 +16,6 @@ import org.opendaylight.controller.cluster.access.client.AbstractClientConnectio
 import org.opendaylight.controller.cluster.access.client.AccessClientUtil;
 import org.opendaylight.controller.cluster.access.commands.TransactionFailure;
 import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy;
 import org.opendaylight.controller.cluster.access.concepts.FailureEnvelope;
 import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
@@ -95,7 +94,7 @@ class TransactionTester<T extends AbstractProxyTransaction> {
         }
 
         @Override
-        protected AbstractRequestFailureProxy<TransactionIdentifier, TransactionFailure> externalizableProxy(
+        protected RequestFailure.SerialForm<TransactionIdentifier, TransactionFailure> externalizableProxy(
                 final ABIVersion version) {
             throw new UnsupportedOperationException("Not implemented");
         }
index 6563ff6eda2be505bdccb37edf525bfae3211936..9f19ca045d6144639ecdcfb19da7301013036775 100644 (file)
@@ -13,10 +13,10 @@ import static org.hamcrest.MatcherAssert.assertThat;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThrows;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
 import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.timeout;
 import static org.mockito.Mockito.verify;
 
@@ -24,6 +24,7 @@ import akka.actor.ActorSystem;
 import com.google.common.base.Throwables;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.util.concurrent.FluentFuture;
+import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.MoreExecutors;
 import com.google.common.util.concurrent.Uninterruptibles;
@@ -37,17 +38,16 @@ import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.runners.Parameterized.Parameter;
-import org.mockito.Mockito;
 import org.opendaylight.controller.cluster.access.client.RequestTimeoutException;
+import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
 import org.opendaylight.controller.cluster.databroker.ConcurrentDOMDataBroker;
 import org.opendaylight.controller.cluster.datastore.TestShard.RequestFrontendMetadata;
-import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
 import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
 import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
-import org.opendaylight.controller.cluster.datastore.persisted.FrontendClientMetadata;
 import org.opendaylight.controller.cluster.datastore.persisted.FrontendShardDataTreeSnapshotMetadata;
 import org.opendaylight.controller.cluster.datastore.persisted.MetadataShardDataTreeSnapshot;
 import org.opendaylight.controller.cluster.datastore.persisted.ShardSnapshotState;
@@ -63,31 +63,29 @@ import org.opendaylight.mdsal.dom.api.DOMDataTreeReadWriteTransaction;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
 import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
 import org.opendaylight.mdsal.dom.api.DOMTransactionChainClosedException;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
 import org.opendaylight.mdsal.dom.spi.store.DOMStore;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.yang.common.Uint64;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 
 public abstract class AbstractDistributedDataStoreIntegrationTest {
-
     @Parameter
-    public Class<? extends AbstractDataStore> testParameter;
+    public Class<? extends ClientBackedDataStore> testParameter;
 
     protected ActorSystem system;
 
@@ -101,8 +99,7 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
     @Test
     public void testWriteTransactionWithSingleShard() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "transactionIntegrationTest", "test-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "transactionIntegrationTest", "test-1")) {
 
             testKit.testWriteTransaction(dataStore, TestModel.TEST_PATH,
                 ImmutableNodes.containerNode(TestModel.TEST_QNAME));
@@ -117,8 +114,8 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
     @Test
     public void testWriteTransactionWithMultipleShards() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testWriteTransactionWithMultipleShards", "cars-1", "people-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testWriteTransactionWithMultipleShards",
+            "cars-1", "people-1")) {
 
             DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
             assertNotNull("newWriteOnlyTransaction returned null", writeTx);
@@ -150,21 +147,16 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
             // Verify the data in the store
             final DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
 
-            Optional<NormalizedNode> optional = readTx.read(carPath).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", car, optional.get());
-
-            optional = readTx.read(personPath).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", person, optional.get());
+            assertEquals(Optional.of(car), readTx.read(carPath).get(5, TimeUnit.SECONDS));
+            assertEquals(Optional.of(person), readTx.read(personPath).get(5, TimeUnit.SECONDS));
         }
     }
 
     @Test
     public void testReadWriteTransactionWithSingleShard() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testReadWriteTransactionWithSingleShard", "test-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testReadWriteTransactionWithSingleShard",
+            "test-1")) {
 
             // 1. Create a read-write Tx
             final DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
@@ -179,9 +171,7 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
             final Boolean exists = readWriteTx.exists(nodePath).get(5, TimeUnit.SECONDS);
             assertEquals("exists", Boolean.TRUE, exists);
 
-            Optional<NormalizedNode> optional = readWriteTx.read(nodePath).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", nodeToWrite, optional.get());
+            assertEquals(Optional.of(nodeToWrite), readWriteTx.read(nodePath).get(5, TimeUnit.SECONDS));
 
             // 4. Ready the Tx for commit
             final DOMStoreThreePhaseCommitCohort cohort = readWriteTx.ready();
@@ -192,17 +182,15 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
             // 6. Verify the data in the store
             final DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
 
-            optional = readTx.read(nodePath).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", nodeToWrite, optional.get());
+            assertEquals(Optional.of(nodeToWrite), readTx.read(nodePath).get(5, TimeUnit.SECONDS));
         }
     }
 
     @Test
     public void testReadWriteTransactionWithMultipleShards() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testReadWriteTransactionWithMultipleShards", "cars-1", "people-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testReadWriteTransactionWithMultipleShards",
+            "cars-1", "people-1")) {
 
             DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
             assertNotNull("newReadWriteTransaction returned null", readWriteTx);
@@ -232,34 +220,27 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
             final Boolean exists = readWriteTx.exists(carPath).get(5, TimeUnit.SECONDS);
             assertEquals("exists", Boolean.TRUE, exists);
 
-            Optional<NormalizedNode> optional = readWriteTx.read(carPath).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", car, optional.get());
+            assertEquals("Data node", Optional.of(car), readWriteTx.read(carPath).get(5, TimeUnit.SECONDS));
 
             testKit.doCommit(readWriteTx.ready());
 
             // Verify the data in the store
             DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
 
-            optional = readTx.read(carPath).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", car, optional.get());
-
-            optional = readTx.read(personPath).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", person, optional.get());
+            assertEquals(Optional.of(car), readTx.read(carPath).get(5, TimeUnit.SECONDS));
+            assertEquals(Optional.of(person), readTx.read(personPath).get(5, TimeUnit.SECONDS));
         }
     }
 
     @Test
     public void testSingleTransactionsWritesInQuickSuccession() throws Exception {
-        final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testSingleTransactionsWritesInQuickSuccession", "cars-1")) {
+        final var testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
+        try (var dataStore = testKit.setupDataStore(testParameter, "testSingleTransactionsWritesInQuickSuccession",
+            "cars-1")) {
 
-            final DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
+            final var txChain = dataStore.createTransactionChain();
 
-            DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
+            var writeTx = txChain.newWriteOnlyTransaction();
             writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
             writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
             testKit.doCommit(writeTx.ready());
@@ -282,16 +263,16 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
                 .untilAsserted(() -> {
                     // verify frontend metadata has no holes in purged transactions causing overtime memory leak
                     final var localShard = dataStore.getActorUtils().findLocalShard("cars-1") .orElseThrow();
-                    FrontendShardDataTreeSnapshotMetadata frontendMetadata =
-                        (FrontendShardDataTreeSnapshotMetadata) dataStore.getActorUtils()
+                    final var frontendMetadata = (FrontendShardDataTreeSnapshotMetadata) dataStore.getActorUtils()
                             .executeOperation(localShard, new RequestFrontendMetadata());
 
                     final var clientMeta = frontendMetadata.getClients().get(0);
-                    if (dataStore.getActorUtils().getDatastoreContext().isUseTellBasedProtocol()) {
-                        assertTellMetadata(clientMeta);
-                    } else {
-                        assertAskMetadata(clientMeta);
+                    final var iterator = clientMeta.getCurrentHistories().iterator();
+                    var metadata = iterator.next();
+                    while (iterator.hasNext() && metadata.getHistoryId() != 1) {
+                        metadata = iterator.next();
                     }
+                    assertEquals("[[0..10]]", metadata.getPurgedTransactions().ranges().toString());
                 });
 
             final var body = txChain.newReadOnlyTransaction().read(CarsModel.CAR_LIST_PATH)
@@ -303,20 +284,6 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
         }
     }
 
-    private static void assertAskMetadata(final FrontendClientMetadata clientMeta) {
-        // ask based should track no metadata
-        assertEquals(List.of(), clientMeta.getCurrentHistories());
-    }
-
-    private static void assertTellMetadata(final FrontendClientMetadata clientMeta) {
-        final var iterator = clientMeta.getCurrentHistories().iterator();
-        var metadata = iterator.next();
-        while (iterator.hasNext() && metadata.getHistoryId() != 1) {
-            metadata = iterator.next();
-        }
-        assertEquals("[[0..10]]", metadata.getPurgedTransactions().ranges().toString());
-    }
-
     @SuppressWarnings("checkstyle:IllegalCatch")
     private void testTransactionCommitFailureWithNoShardLeader(final boolean writeOnly, final String testName)
             throws Exception {
@@ -333,8 +300,7 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
         datastoreContextBuilder.shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(1)
         .shardInitializationTimeout(200, TimeUnit.MILLISECONDS).frontendRequestTimeoutInSeconds(2);
 
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(testParameter, testName, false, shardName)) {
-
+        try (var dataStore = testKit.setupDataStore(testParameter, testName, false, shardName)) {
             final Object result = dataStore.getActorUtils().executeOperation(
                 dataStore.getActorUtils().getShardManager(), new FindLocalShard(shardName, true));
             assertTrue("Expected LocalShardFound. Actual: " + result, result instanceof LocalShardFound);
@@ -379,18 +345,10 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
                 // leader was elected in time, the Tx
                 // should have timed out and throw an appropriate
                 // exception cause.
-                try {
-                    txCohort.get().canCommit().get(10, TimeUnit.SECONDS);
-                    fail("Expected NoShardLeaderException");
-                } catch (final ExecutionException e) {
-                    final String msg = "Unexpected exception: "
-                            + Throwables.getStackTraceAsString(e.getCause());
-                    if (DistributedDataStore.class.isAssignableFrom(testParameter)) {
-                        assertTrue(Throwables.getRootCause(e) instanceof NoShardLeaderException);
-                    } else {
-                        assertTrue(msg, Throwables.getRootCause(e) instanceof RequestTimeoutException);
-                    }
-                }
+                final var ex = assertThrows(ExecutionException.class,
+                    () -> txCohort.get().canCommit().get(10, TimeUnit.SECONDS));
+                assertTrue("Unexpected exception: " + Throwables.getStackTraceAsString(ex.getCause()),
+                    Throwables.getRootCause(ex) instanceof RequestTimeoutException);
             } finally {
                 try {
                     if (writeTxToClose != null) {
@@ -418,8 +376,7 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
     @Test
     public void testTransactionAbort() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "transactionAbortIntegrationTest", "test-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "transactionAbortIntegrationTest", "test-1")) {
 
             final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
             assertNotNull("newWriteOnlyTransaction returned null", writeTx);
@@ -441,8 +398,7 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
     @SuppressWarnings("checkstyle:IllegalCatch")
     public void testTransactionChainWithSingleShard() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testTransactionChainWithSingleShard", "test-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testTransactionChainWithSingleShard", "test-1")) {
 
             // 1. Create a Tx chain and write-only Tx
             final DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
@@ -477,9 +433,7 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
             // the data from the first
             // Tx is visible after being readied.
             DOMStoreReadTransaction readTx = txChain.newReadOnlyTransaction();
-            Optional<NormalizedNode> optional = readTx.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", testNode, optional.get());
+            assertEquals(Optional.of(testNode), readTx.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS));
 
             // 6. Create a new RW Tx from the chain, write more data,
             // and ready it
@@ -495,9 +449,7 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
             // from the last RW Tx to
             // verify it is visible.
             readTx = txChain.newReadWriteTransaction();
-            optional = readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", outerNode, optional.get());
+            assertEquals(Optional.of(outerNode), readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS));
 
             // 8. Wait for the 2 commits to complete and close the
             // chain.
@@ -515,17 +467,15 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
             // 9. Create a new read Tx from the data store and verify
             // committed data.
             readTx = dataStore.newReadOnlyTransaction();
-            optional = readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", outerNode, optional.get());
+            assertEquals(Optional.of(outerNode), readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS));
         }
     }
 
     @Test
     public void testTransactionChainWithMultipleShards() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testTransactionChainWithMultipleShards", "cars-1", "people-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testTransactionChainWithMultipleShards",
+            "cars-1", "people-1")) {
 
             final DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
 
@@ -550,13 +500,8 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
             final YangInstanceIdentifier personPath = PeopleModel.newPersonPath("jack");
             readWriteTx.merge(personPath, person);
 
-            Optional<NormalizedNode> optional = readWriteTx.read(carPath).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", car, optional.get());
-
-            optional = readWriteTx.read(personPath).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", person, optional.get());
+            assertEquals(Optional.of(car), readWriteTx.read(carPath).get(5, TimeUnit.SECONDS));
+            assertEquals(Optional.of(person), readWriteTx.read(personPath).get(5, TimeUnit.SECONDS));
 
             final DOMStoreThreePhaseCommitCohort cohort2 = readWriteTx.ready();
 
@@ -577,28 +522,23 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
 
             final DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
 
-            optional = readTx.read(carPath).get(5, TimeUnit.SECONDS);
-            assertFalse("isPresent", optional.isPresent());
-
-            optional = readTx.read(personPath).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", person, optional.get());
+            assertEquals(Optional.empty(), readTx.read(carPath).get(5, TimeUnit.SECONDS));
+            assertEquals(Optional.of(person), readTx.read(personPath).get(5, TimeUnit.SECONDS));
         }
     }
 
     @Test
     public void testCreateChainedTransactionsInQuickSuccession() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testCreateChainedTransactionsInQuickSuccession", "cars-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testCreateChainedTransactionsInQuickSuccession",
+            "cars-1")) {
 
             final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker(
                 ImmutableMap.<LogicalDatastoreType, DOMStore>builder()
                 .put(LogicalDatastoreType.CONFIGURATION, dataStore).build(),
                 MoreExecutors.directExecutor());
 
-            final DOMTransactionChainListener listener = Mockito.mock(DOMTransactionChainListener.class);
-            DOMTransactionChain txChain = broker.createTransactionChain(listener);
+            DOMTransactionChain txChain = broker.createTransactionChain();
 
             final List<ListenableFuture<?>> futures = new ArrayList<>();
 
@@ -624,7 +564,7 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
             final Optional<NormalizedNode> optional = txChain.newReadOnlyTransaction()
                     .read(LogicalDatastoreType.CONFIGURATION, CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS);
             assertTrue("isPresent", optional.isPresent());
-            assertEquals("# cars", numCars, ((Collection<?>) optional.get().body()).size());
+            assertEquals("# cars", numCars, ((Collection<?>) optional.orElseThrow().body()).size());
 
             txChain.close();
 
@@ -635,8 +575,8 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
     @Test
     public void testCreateChainedTransactionAfterEmptyTxReadied() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testCreateChainedTransactionAfterEmptyTxReadied", "test-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testCreateChainedTransactionAfterEmptyTxReadied",
+            "test-1")) {
 
             final DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
 
@@ -656,8 +596,8 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
     @Test
     public void testCreateChainedTransactionWhenPreviousNotReady() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testCreateChainedTransactionWhenPreviousNotReady", "test-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testCreateChainedTransactionWhenPreviousNotReady",
+            "test-1")) {
 
             final DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
 
@@ -676,8 +616,8 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
     @Test
     public void testCreateChainedTransactionAfterClose() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testCreateChainedTransactionAfterClose", "test-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testCreateChainedTransactionAfterClose",
+            "test-1")) {
 
             final DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
             txChain.close();
@@ -691,8 +631,8 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
     @Test
     public void testChainWithReadOnlyTxAfterPreviousReady() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testChainWithReadOnlyTxAfterPreviousReady", "test-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testChainWithReadOnlyTxAfterPreviousReady",
+            "test-1")) {
 
             final DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
 
@@ -733,38 +673,34 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
 
     @Test
     public void testChainedTransactionFailureWithSingleShard() throws Exception {
-        final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testChainedTransactionFailureWithSingleShard", "cars-1")) {
+        final var testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
+        try (var dataStore = testKit.setupDataStore(testParameter, "testChainedTransactionFailureWithSingleShard",
+            "cars-1")) {
 
-            final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker(
+            final var broker = new ConcurrentDOMDataBroker(
                 ImmutableMap.<LogicalDatastoreType, DOMStore>builder()
                 .put(LogicalDatastoreType.CONFIGURATION, dataStore).build(),
                 MoreExecutors.directExecutor());
 
-            final DOMTransactionChainListener listener = Mockito.mock(DOMTransactionChainListener.class);
-            final DOMTransactionChain txChain = broker.createTransactionChain(listener);
+            final var listener = mock(FutureCallback.class);
+            final var txChain = broker.createTransactionChain();
+            txChain.addCallback(listener);
 
-            final DOMDataTreeReadWriteTransaction writeTx = txChain.newReadWriteTransaction();
+            final var writeTx = txChain.newReadWriteTransaction();
 
             writeTx.put(LogicalDatastoreType.CONFIGURATION, PeopleModel.BASE_PATH,
                 PeopleModel.emptyContainer());
 
-            final ContainerNode invalidData = ImmutableContainerNodeBuilder.create()
-                    .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(CarsModel.BASE_QNAME))
-                    .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build();
+            final var invalidData = Builders.containerBuilder()
+                    .withNodeIdentifier(new NodeIdentifier(CarsModel.BASE_QNAME))
+                    .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk"))
+                    .build();
 
             writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, invalidData);
 
-            try {
-                writeTx.commit().get(5, TimeUnit.SECONDS);
-                fail("Expected TransactionCommitFailedException");
-            } catch (final ExecutionException e) {
-                // Expected
-            }
+            assertThrows(ExecutionException.class, () -> writeTx.commit().get(5, TimeUnit.SECONDS));
 
-            verify(listener, timeout(5000)).onTransactionChainFailed(eq(txChain), eq(writeTx),
-                any(Throwable.class));
+            verify(listener, timeout(5000)).onFailure(any());
 
             txChain.close();
             broker.close();
@@ -774,40 +710,36 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
     @Test
     public void testChainedTransactionFailureWithMultipleShards() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testChainedTransactionFailureWithMultipleShards", "cars-1", "people-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testChainedTransactionFailureWithMultipleShards",
+            "cars-1", "people-1")) {
 
             final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker(
                 ImmutableMap.<LogicalDatastoreType, DOMStore>builder()
                 .put(LogicalDatastoreType.CONFIGURATION, dataStore).build(),
                 MoreExecutors.directExecutor());
 
-            final DOMTransactionChainListener listener = Mockito.mock(DOMTransactionChainListener.class);
-            final DOMTransactionChain txChain = broker.createTransactionChain(listener);
+            final var listener = mock(FutureCallback.class);
+            final DOMTransactionChain txChain = broker.createTransactionChain();
+            txChain.addCallback(listener);
 
             final DOMDataTreeWriteTransaction writeTx = txChain.newReadWriteTransaction();
 
             writeTx.put(LogicalDatastoreType.CONFIGURATION, PeopleModel.BASE_PATH,
                 PeopleModel.emptyContainer());
 
-            final ContainerNode invalidData = ImmutableContainerNodeBuilder.create()
-                    .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(CarsModel.BASE_QNAME))
-                    .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build();
+            final ContainerNode invalidData = Builders.containerBuilder()
+                .withNodeIdentifier(new NodeIdentifier(CarsModel.BASE_QNAME))
+                .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk"))
+                .build();
 
             writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, invalidData);
 
             // Note that merge will validate the data and fail but put
             // succeeds b/c deep validation is not
             // done for put for performance reasons.
-            try {
-                writeTx.commit().get(5, TimeUnit.SECONDS);
-                fail("Expected TransactionCommitFailedException");
-            } catch (final ExecutionException e) {
-                // Expected
-            }
+            assertThrows(ExecutionException.class, () -> writeTx.commit().get(5, TimeUnit.SECONDS));
 
-            verify(listener, timeout(5000)).onTransactionChainFailed(eq(txChain), eq(writeTx),
-                any(Throwable.class));
+            verify(listener, timeout(5000)).onFailure(any());
 
             txChain.close();
             broker.close();
@@ -817,16 +749,15 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
     @Test
     public void testDataTreeChangeListenerRegistration() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testDataTreeChangeListenerRegistration", "test-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testDataTreeChangeListenerRegistration",
+            "test-1")) {
 
             testKit.testWriteTransaction(dataStore, TestModel.TEST_PATH,
                 ImmutableNodes.containerNode(TestModel.TEST_QNAME));
 
             final MockDataTreeChangeListener listener = new MockDataTreeChangeListener(1);
 
-            ListenerRegistration<MockDataTreeChangeListener> listenerReg = dataStore
-                    .registerTreeChangeListener(TestModel.TEST_PATH, listener);
+            final var listenerReg = dataStore.registerTreeChangeListener(TestModel.TEST_PATH, listener);
 
             assertNotNull("registerTreeChangeListener returned null", listenerReg);
 
@@ -878,7 +809,7 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
         DataTree dataTree = new InMemoryDataTreeFactory().create(
             DataTreeConfiguration.DEFAULT_OPERATIONAL, SchemaContextHelper.full());
         AbstractShardTest.writeToStore(dataTree, CarsModel.BASE_PATH, carsNode);
-        NormalizedNode root = AbstractShardTest.readStore(dataTree, YangInstanceIdentifier.empty());
+        NormalizedNode root = AbstractShardTest.readStore(dataTree, YangInstanceIdentifier.of());
 
         final Snapshot carsSnapshot = Snapshot.create(
             new ShardSnapshotState(new MetadataShardDataTreeSnapshot(root)),
@@ -890,7 +821,7 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
         final NormalizedNode peopleNode = PeopleModel.create();
         AbstractShardTest.writeToStore(dataTree, PeopleModel.BASE_PATH, peopleNode);
 
-        root = AbstractShardTest.readStore(dataTree, YangInstanceIdentifier.empty());
+        root = AbstractShardTest.readStore(dataTree, YangInstanceIdentifier.of());
 
         final Snapshot peopleSnapshot = Snapshot.create(
             new ShardSnapshotState(new MetadataShardDataTreeSnapshot(root)),
@@ -900,41 +831,31 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
             new DatastoreSnapshot.ShardSnapshot("cars", carsSnapshot),
             new DatastoreSnapshot.ShardSnapshot("people", peopleSnapshot)));
 
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, name, "module-shards-member1.conf", true, "cars", "people")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, name, "module-shards-member1.conf", true,
+            "cars", "people")) {
 
             final DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
 
             // two reads
-            Optional<NormalizedNode> optional = readTx.read(CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", carsNode, optional.get());
-
-            optional = readTx.read(PeopleModel.BASE_PATH).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", peopleNode, optional.get());
+            assertEquals(Optional.of(carsNode), readTx.read(CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS));
+            assertEquals(Optional.of(peopleNode), readTx.read(PeopleModel.BASE_PATH).get(5, TimeUnit.SECONDS));
         }
     }
 
     @Test
+    @Ignore("ClientBackedDatastore does not have stable indexes/term, the snapshot index seems to fluctuate")
+    // FIXME: re-enable this test
     public void testSnapshotOnRootOverwrite() throws Exception {
-        if (!DistributedDataStore.class.isAssignableFrom(testParameter)) {
-            // FIXME: ClientBackedDatastore does not have stable indexes/term, the snapshot index seems to fluctuate
-            return;
-        }
+        final var testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder.snapshotOnRootOverwrite(true));
+        try (var dataStore = testKit.setupDataStore(testParameter, "testRootOverwrite",
+            "module-shards-default-cars-member1.conf", true, "cars", "default")) {
 
-        final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(),
-                datastoreContextBuilder.snapshotOnRootOverwrite(true));
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-                testParameter, "testRootOverwrite", "module-shards-default-cars-member1.conf",
-                true, "cars", "default")) {
-
-            ContainerNode rootNode = ImmutableContainerNodeBuilder.create()
-                    .withNodeIdentifier(YangInstanceIdentifier.NodeIdentifier.create(SchemaContext.NAME))
-                    .withChild(CarsModel.create())
-                    .build();
+            final var rootNode = Builders.containerBuilder()
+                .withNodeIdentifier(NodeIdentifier.create(SchemaContext.NAME))
+                .withChild(CarsModel.create())
+                .build();
 
-            testKit.testWriteTransaction(dataStore, YangInstanceIdentifier.empty(), rootNode);
+            testKit.testWriteTransaction(dataStore, YangInstanceIdentifier.of(), rootNode);
             IntegrationTestKit.verifyShardState(dataStore, "cars",
                 state -> assertEquals(1, state.getSnapshotIndex()));
 
@@ -954,7 +875,7 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
             verifySnapshot("member-1-shard-cars-testRootOverwrite", 1, 1);
 
             // root overwrite so expect a snapshot
-            testKit.testWriteTransaction(dataStore, YangInstanceIdentifier.empty(), rootNode);
+            testKit.testWriteTransaction(dataStore, YangInstanceIdentifier.of(), rootNode);
 
             // this was a real snapshot so everything should be in it(1 + 10 + 1)
             IntegrationTestKit.verifyShardState(dataStore, "cars",
index 31f2b6b0674a6be403468f364487e861f111c00b..ab95f7028a998598cfce153dba8031e66a4cc0d5 100644 (file)
@@ -66,20 +66,21 @@ import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
 import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
 import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateTip;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import scala.concurrent.Await;
 import scala.concurrent.Future;
@@ -167,7 +168,7 @@ public abstract class AbstractShardTest extends AbstractActorTest {
             final Optional<DataContainerChild> idLeaf =
                     mapEntry.findChildByArg(new YangInstanceIdentifier.NodeIdentifier(TestModel.ID_QNAME));
             assertTrue("Missing leaf " + TestModel.ID_QNAME.getLocalName(), idLeaf.isPresent());
-            final Object value = idLeaf.get().body();
+            final Object value = idLeaf.orElseThrow().body();
             assertTrue("Unexpected value for leaf " + TestModel.ID_QNAME.getLocalName() + ": " + value,
                     listEntryKeys.remove(value));
         }
@@ -346,7 +347,7 @@ public abstract class AbstractShardTest extends AbstractActorTest {
 
         writeToStore(testStore, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
 
-        final NormalizedNode root = readStore(testStore, YangInstanceIdentifier.empty());
+        final NormalizedNode root = readStore(testStore, YangInstanceIdentifier.of());
 
         InMemorySnapshotStore.addSnapshot(shardID.toString(), Snapshot.create(
                 new ShardSnapshotState(new MetadataShardDataTreeSnapshot(root)),
@@ -398,15 +399,14 @@ public abstract class AbstractShardTest extends AbstractActorTest {
         final Optional<DataContainerChild> idLeaf =
                 mapEntry.findChildByArg(new YangInstanceIdentifier.NodeIdentifier(TestModel.ID_QNAME));
         assertTrue("Missing leaf " + TestModel.ID_QNAME.getLocalName(), idLeaf.isPresent());
-        assertEquals(TestModel.ID_QNAME.getLocalName() + " value", expIDValue, idLeaf.get().body());
+        assertEquals(TestModel.ID_QNAME.getLocalName() + " value", expIDValue, idLeaf.orElseThrow().body());
     }
 
     public static DataTreeCandidateTip mockCandidate(final String name) {
         final DataTreeCandidateTip mockCandidate = mock(DataTreeCandidateTip.class, name);
         final DataTreeCandidateNode mockCandidateNode = mock(DataTreeCandidateNode.class, name + "-node");
-        doReturn(ModificationType.WRITE).when(mockCandidateNode).getModificationType();
-        doReturn(Optional.of(ImmutableNodes.containerNode(CarsModel.CARS_QNAME)))
-                .when(mockCandidateNode).getDataAfter();
+        doReturn(ModificationType.WRITE).when(mockCandidateNode).modificationType();
+        doReturn(ImmutableNodes.containerNode(CarsModel.CARS_QNAME)).when(mockCandidateNode).dataAfter();
         doReturn(CarsModel.BASE_PATH).when(mockCandidate).getRootPath();
         doReturn(mockCandidateNode).when(mockCandidate).getRootNode();
         return mockCandidate;
@@ -415,8 +415,8 @@ public abstract class AbstractShardTest extends AbstractActorTest {
     static DataTreeCandidateTip mockUnmodifiedCandidate(final String name) {
         final DataTreeCandidateTip mockCandidate = mock(DataTreeCandidateTip.class, name);
         final DataTreeCandidateNode mockCandidateNode = mock(DataTreeCandidateNode.class, name + "-node");
-        doReturn(ModificationType.UNMODIFIED).when(mockCandidateNode).getModificationType();
-        doReturn(YangInstanceIdentifier.empty()).when(mockCandidate).getRootPath();
+        doReturn(ModificationType.UNMODIFIED).when(mockCandidateNode).modificationType();
+        doReturn(YangInstanceIdentifier.of()).when(mockCandidate).getRootPath();
         doReturn(mockCandidateNode).when(mockCandidate).getRootNode();
         return mockCandidate;
     }
@@ -446,7 +446,7 @@ public abstract class AbstractShardTest extends AbstractActorTest {
 
     public static class CapturingShardDataTreeCohort extends ShardDataTreeCohort {
         private volatile ShardDataTreeCohort delegate;
-        private FutureCallback<Void> canCommit;
+        private FutureCallback<Empty> canCommit;
         private FutureCallback<DataTreeCandidate> preCommit;
         private FutureCallback<UnsignedLong> commit;
 
@@ -454,7 +454,7 @@ public abstract class AbstractShardTest extends AbstractActorTest {
             this.delegate = delegate;
         }
 
-        public FutureCallback<Void> getCanCommit() {
+        public FutureCallback<Empty> getCanCommit() {
             assertNotNull("canCommit was not invoked", canCommit);
             return canCommit;
         }
@@ -470,8 +470,8 @@ public abstract class AbstractShardTest extends AbstractActorTest {
         }
 
         @Override
-        public TransactionIdentifier getIdentifier() {
-            return delegate.getIdentifier();
+        TransactionIdentifier transactionId() {
+            return delegate.transactionId();
         }
 
         @Override
@@ -485,7 +485,7 @@ public abstract class AbstractShardTest extends AbstractActorTest {
         }
 
         @Override
-        public void canCommit(final FutureCallback<Void> callback) {
+        public void canCommit(final FutureCallback<Empty> callback) {
             canCommit = mockFutureCallback(callback);
             delegate.canCommit(canCommit);
         }
@@ -519,7 +519,7 @@ public abstract class AbstractShardTest extends AbstractActorTest {
         }
 
         @Override
-        public void abort(final FutureCallback<Void> callback) {
+        public void abort(final FutureCallback<Empty> callback) {
             delegate.abort(callback);
         }
 
index a3726e270bea47a1ad4b840e20ab14b6b5753709..67987c3e37bb147b6e56d04165035f06d93923f8 100644 (file)
@@ -45,12 +45,20 @@ public abstract class AbstractTest {
         TX_COUNTER.set(1L);
     }
 
+    protected static TransactionIdentifier newTransactionId(final long txId) {
+        return new TransactionIdentifier(HISTORY_ID, txId);
+    }
+
     protected static TransactionIdentifier nextTransactionId() {
-        return new TransactionIdentifier(HISTORY_ID, TX_COUNTER.getAndIncrement());
+        return newTransactionId(TX_COUNTER.getAndIncrement());
+    }
+
+    protected static LocalHistoryIdentifier newHistoryId(final long historyId) {
+        return new LocalHistoryIdentifier(CLIENT_ID, historyId);
     }
 
     protected static LocalHistoryIdentifier nextHistoryId() {
-        return new LocalHistoryIdentifier(CLIENT_ID, HISTORY_COUNTER.incrementAndGet());
+        return newHistoryId(HISTORY_COUNTER.incrementAndGet());
     }
 
     protected static <T> T waitOnAsyncTask(final CompletionStage<T> completionStage, final FiniteDuration timeout)
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionProxyTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionProxyTest.java
deleted file mode 100644 (file)
index d3ae761..0000000
+++ /dev/null
@@ -1,479 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.argThat;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.ArgumentMatchers.isA;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-
-import akka.actor.ActorRef;
-import akka.actor.ActorSelection;
-import akka.actor.ActorSystem;
-import akka.actor.Props;
-import akka.dispatch.Futures;
-import akka.testkit.javadsl.TestKit;
-import akka.util.Timeout;
-import com.codahale.metrics.MetricRegistry;
-import com.codahale.metrics.Timer;
-import com.google.common.base.Throwables;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.FluentFuture;
-import com.typesafe.config.Config;
-import com.typesafe.config.ConfigFactory;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.mockito.ArgumentCaptor;
-import org.mockito.ArgumentMatcher;
-import org.mockito.Mock;
-import org.mockito.Mockito;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
-import org.opendaylight.controller.cluster.datastore.TransactionProxyTest.TestException;
-import org.opendaylight.controller.cluster.datastore.config.Configuration;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
-import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.DataExists;
-import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.controller.cluster.datastore.messages.ReadData;
-import org.opendaylight.controller.cluster.datastore.messages.ReadDataReply;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
-import org.opendaylight.controller.cluster.datastore.modification.AbstractModification;
-import org.opendaylight.controller.cluster.datastore.modification.Modification;
-import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.cluster.datastore.utils.MockConfiguration;
-import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
-import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.common.api.ReadFailedException;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Await;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
-/**
- * Abstract base class for TransactionProxy unit tests.
- *
- * @author Thomas Pantelis
- */
-public abstract class AbstractTransactionProxyTest extends AbstractTest {
-    protected final Logger log = LoggerFactory.getLogger(getClass());
-
-    private static ActorSystem system;
-    private static SchemaContext SCHEMA_CONTEXT;
-
-    private final Configuration configuration = new MockConfiguration() {
-        Map<String, ShardStrategy> strategyMap = ImmutableMap.<String, ShardStrategy>builder().put(
-                TestModel.JUNK_QNAME.getLocalName(), new ShardStrategy() {
-                    @Override
-                    public String findShard(final YangInstanceIdentifier path) {
-                        return TestModel.JUNK_QNAME.getLocalName();
-                    }
-                }).put(
-                CarsModel.BASE_QNAME.getLocalName(), new ShardStrategy() {
-                    @Override
-                    public String findShard(final YangInstanceIdentifier path) {
-                        return CarsModel.BASE_QNAME.getLocalName();
-                    }
-                }).build();
-
-        @Override
-        public ShardStrategy getStrategyForModule(final String moduleName) {
-            return strategyMap.get(moduleName);
-        }
-
-        @Override
-        public String getModuleNameFromNameSpace(final String nameSpace) {
-            if (TestModel.JUNK_QNAME.getNamespace().toString().equals(nameSpace)) {
-                return TestModel.JUNK_QNAME.getLocalName();
-            } else if (CarsModel.BASE_QNAME.getNamespace().toString().equals(nameSpace)) {
-                return CarsModel.BASE_QNAME.getLocalName();
-            }
-            return null;
-        }
-    };
-
-    @Mock
-    protected ActorUtils mockActorContext;
-
-    protected TransactionContextFactory mockComponentFactory;
-
-    @Mock
-    private ClusterWrapper mockClusterWrapper;
-
-    protected final String memberName = "mock-member";
-
-    private final int operationTimeoutInSeconds = 2;
-    protected final Builder dataStoreContextBuilder = DatastoreContext.newBuilder()
-            .operationTimeoutInSeconds(operationTimeoutInSeconds);
-
-    @BeforeClass
-    public static void setUpClass() {
-
-        Config config = ConfigFactory.parseMap(ImmutableMap.<String, Object>builder()
-                .put("akka.actor.default-dispatcher.type",
-                        "akka.testkit.CallingThreadDispatcherConfigurator").build())
-                .withFallback(ConfigFactory.load());
-        system = ActorSystem.create("test", config);
-        SCHEMA_CONTEXT = TestModel.createTestContext();
-    }
-
-    @AfterClass
-    public static void tearDownClass() {
-        TestKit.shutdownActorSystem(system);
-        system = null;
-        SCHEMA_CONTEXT = null;
-    }
-
-    @Before
-    public void setUp() {
-        MockitoAnnotations.initMocks(this);
-
-        doReturn(getSystem()).when(mockActorContext).getActorSystem();
-        doReturn(getSystem().dispatchers().defaultGlobalDispatcher()).when(mockActorContext).getClientDispatcher();
-        doReturn(MemberName.forName(memberName)).when(mockActorContext).getCurrentMemberName();
-        doReturn(new ShardStrategyFactory(configuration)).when(mockActorContext).getShardStrategyFactory();
-        doReturn(SCHEMA_CONTEXT).when(mockActorContext).getSchemaContext();
-        doReturn(new Timeout(operationTimeoutInSeconds, TimeUnit.SECONDS)).when(mockActorContext).getOperationTimeout();
-        doReturn(mockClusterWrapper).when(mockActorContext).getClusterWrapper();
-        doReturn(mockClusterWrapper).when(mockActorContext).getClusterWrapper();
-        doReturn(dataStoreContextBuilder.build()).when(mockActorContext).getDatastoreContext();
-        doReturn(new Timeout(5, TimeUnit.SECONDS)).when(mockActorContext).getTransactionCommitOperationTimeout();
-
-        final ClientIdentifier mockClientId = MockIdentifiers.clientIdentifier(getClass(), memberName);
-        mockComponentFactory = new TransactionContextFactory(mockActorContext, mockClientId);
-
-        Timer timer = new MetricRegistry().timer("test");
-        doReturn(timer).when(mockActorContext).getOperationTimer(any(String.class));
-    }
-
-    protected ActorSystem getSystem() {
-        return system;
-    }
-
-    protected CreateTransaction eqCreateTransaction(final String expMemberName,
-            final TransactionType type) {
-        class CreateTransactionArgumentMatcher implements ArgumentMatcher<CreateTransaction> {
-            @Override
-            public boolean matches(final CreateTransaction argument) {
-                return argument.getTransactionId().getHistoryId().getClientId().getFrontendId().getMemberName()
-                        .getName().equals(expMemberName) && argument.getTransactionType() == type.ordinal();
-            }
-        }
-
-        return argThat(new CreateTransactionArgumentMatcher());
-    }
-
-    protected DataExists eqDataExists() {
-        class DataExistsArgumentMatcher implements ArgumentMatcher<DataExists> {
-            @Override
-            public boolean matches(final DataExists argument) {
-                return argument.getPath().equals(TestModel.TEST_PATH);
-            }
-        }
-
-        return argThat(new DataExistsArgumentMatcher());
-    }
-
-    protected ReadData eqReadData() {
-        return eqReadData(TestModel.TEST_PATH);
-    }
-
-    protected ReadData eqReadData(final YangInstanceIdentifier path) {
-        class ReadDataArgumentMatcher implements ArgumentMatcher<ReadData> {
-            @Override
-            public boolean matches(final ReadData argument) {
-                return argument.getPath().equals(path);
-            }
-        }
-
-        return argThat(new ReadDataArgumentMatcher());
-    }
-
-    protected Future<Object> readyTxReply(final String path) {
-        return Futures.successful((Object)new ReadyTransactionReply(path));
-    }
-
-
-    protected Future<ReadDataReply> readDataReply(final NormalizedNode data) {
-        return Futures.successful(new ReadDataReply(data, DataStoreVersions.CURRENT_VERSION));
-    }
-
-    protected Future<DataExistsReply> dataExistsReply(final boolean exists) {
-        return Futures.successful(new DataExistsReply(exists, DataStoreVersions.CURRENT_VERSION));
-    }
-
-    protected Future<BatchedModificationsReply> batchedModificationsReply(final int count) {
-        return Futures.successful(new BatchedModificationsReply(count));
-    }
-
-    @SuppressWarnings("unchecked")
-    protected Future<Object> incompleteFuture() {
-        return mock(Future.class);
-    }
-
-    protected ActorSelection actorSelection(final ActorRef actorRef) {
-        return getSystem().actorSelection(actorRef.path());
-    }
-
-    protected void expectBatchedModifications(final ActorRef actorRef, final int count) {
-        doReturn(batchedModificationsReply(count)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), isA(BatchedModifications.class), any(Timeout.class));
-    }
-
-    protected void expectBatchedModifications(final int count) {
-        doReturn(batchedModificationsReply(count)).when(mockActorContext).executeOperationAsync(
-                any(ActorSelection.class), isA(BatchedModifications.class), any(Timeout.class));
-    }
-
-    protected void expectBatchedModificationsReady(final ActorRef actorRef) {
-        expectBatchedModificationsReady(actorRef, false);
-    }
-
-    protected void expectBatchedModificationsReady(final ActorRef actorRef, final boolean doCommitOnReady) {
-        doReturn(doCommitOnReady ? Futures.successful(new CommitTransactionReply().toSerializable()) :
-            readyTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
-                    eq(actorSelection(actorRef)), isA(BatchedModifications.class), any(Timeout.class));
-    }
-
-    protected void expectIncompleteBatchedModifications() {
-        doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
-                any(ActorSelection.class), isA(BatchedModifications.class), any(Timeout.class));
-    }
-
-    protected void expectFailedBatchedModifications(final ActorRef actorRef) {
-        doReturn(Futures.failed(new TestException())).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), isA(BatchedModifications.class), any(Timeout.class));
-    }
-
-    protected void expectReadyLocalTransaction(final ActorRef actorRef, final boolean doCommitOnReady) {
-        doReturn(doCommitOnReady ? Futures.successful(new CommitTransactionReply().toSerializable()) :
-            readyTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
-                    eq(actorSelection(actorRef)), isA(ReadyLocalTransaction.class), any(Timeout.class));
-    }
-
-    protected CreateTransactionReply createTransactionReply(final ActorRef actorRef, final short transactionVersion) {
-        return new CreateTransactionReply(actorRef.path().toString(), nextTransactionId(), transactionVersion);
-    }
-
-    protected ActorRef setupActorContextWithoutInitialCreateTransaction(final ActorSystem actorSystem) {
-        return setupActorContextWithoutInitialCreateTransaction(actorSystem, DefaultShardStrategy.DEFAULT_SHARD);
-    }
-
-    protected ActorRef setupActorContextWithoutInitialCreateTransaction(final ActorSystem actorSystem,
-            final String shardName) {
-        return setupActorContextWithoutInitialCreateTransaction(actorSystem, shardName,
-                DataStoreVersions.CURRENT_VERSION);
-    }
-
-    protected ActorRef setupActorContextWithoutInitialCreateTransaction(final ActorSystem actorSystem,
-            final String shardName, final short transactionVersion) {
-        ActorRef actorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
-        log.info("Created mock shard actor {}", actorRef);
-
-        doReturn(actorSystem.actorSelection(actorRef.path()))
-                .when(mockActorContext).actorSelection(actorRef.path().toString());
-
-        doReturn(primaryShardInfoReply(actorSystem, actorRef, transactionVersion))
-                .when(mockActorContext).findPrimaryShardAsync(eq(shardName));
-
-        return actorRef;
-    }
-
-    protected Future<PrimaryShardInfo> primaryShardInfoReply(final ActorSystem actorSystem, final ActorRef actorRef) {
-        return primaryShardInfoReply(actorSystem, actorRef, DataStoreVersions.CURRENT_VERSION);
-    }
-
-    protected Future<PrimaryShardInfo> primaryShardInfoReply(final ActorSystem actorSystem, final ActorRef actorRef,
-            final short transactionVersion) {
-        return Futures.successful(new PrimaryShardInfo(actorSystem.actorSelection(actorRef.path()),
-                transactionVersion));
-    }
-
-    protected ActorRef setupActorContextWithInitialCreateTransaction(final ActorSystem actorSystem,
-            final TransactionType type, final short transactionVersion, final String shardName) {
-        ActorRef shardActorRef = setupActorContextWithoutInitialCreateTransaction(actorSystem, shardName,
-                transactionVersion);
-
-        return setupActorContextWithInitialCreateTransaction(actorSystem, type, transactionVersion,
-                memberName, shardActorRef);
-    }
-
-    protected ActorRef setupActorContextWithInitialCreateTransaction(final ActorSystem actorSystem,
-            final TransactionType type, final short transactionVersion, final String prefix,
-            final ActorRef shardActorRef) {
-
-        ActorRef txActorRef;
-        if (type == TransactionType.WRITE_ONLY
-                && dataStoreContextBuilder.build().isWriteOnlyTransactionOptimizationsEnabled()) {
-            txActorRef = shardActorRef;
-        } else {
-            txActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
-            log.info("Created mock shard Tx actor {}", txActorRef);
-
-            doReturn(actorSystem.actorSelection(txActorRef.path()))
-                .when(mockActorContext).actorSelection(txActorRef.path().toString());
-
-            doReturn(Futures.successful(createTransactionReply(txActorRef, transactionVersion))).when(mockActorContext)
-                .executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
-                        eqCreateTransaction(prefix, type), any(Timeout.class));
-        }
-
-        return txActorRef;
-    }
-
-    protected ActorRef setupActorContextWithInitialCreateTransaction(final ActorSystem actorSystem,
-            final TransactionType type) {
-        return setupActorContextWithInitialCreateTransaction(actorSystem, type, DataStoreVersions.CURRENT_VERSION,
-                DefaultShardStrategy.DEFAULT_SHARD);
-    }
-
-    protected ActorRef setupActorContextWithInitialCreateTransaction(final ActorSystem actorSystem,
-            final TransactionType type,
-            final String shardName) {
-        return setupActorContextWithInitialCreateTransaction(actorSystem, type, DataStoreVersions.CURRENT_VERSION,
-                shardName);
-    }
-
-    @SuppressWarnings({"checkstyle:avoidHidingCauseException", "checkstyle:IllegalThrows"})
-    protected void propagateReadFailedExceptionCause(final FluentFuture<?> future) throws Throwable {
-        try {
-            future.get(5, TimeUnit.SECONDS);
-            fail("Expected ReadFailedException");
-        } catch (ExecutionException e) {
-            final Throwable cause = e.getCause();
-            assertTrue("Unexpected cause: " + cause.getClass(), cause instanceof ReadFailedException);
-            throw Throwables.getRootCause(cause);
-        }
-    }
-
-    protected List<BatchedModifications> captureBatchedModifications(final ActorRef actorRef) {
-        ArgumentCaptor<BatchedModifications> batchedModificationsCaptor =
-                ArgumentCaptor.forClass(BatchedModifications.class);
-        verify(mockActorContext, Mockito.atLeastOnce()).executeOperationAsync(
-                eq(actorSelection(actorRef)), batchedModificationsCaptor.capture(), any(Timeout.class));
-
-        List<BatchedModifications> batchedModifications = filterCaptured(
-                batchedModificationsCaptor, BatchedModifications.class);
-        return batchedModifications;
-    }
-
-    protected <T> List<T> filterCaptured(final ArgumentCaptor<T> captor, final Class<T> type) {
-        List<T> captured = new ArrayList<>();
-        for (T c: captor.getAllValues()) {
-            if (type.isInstance(c)) {
-                captured.add(c);
-            }
-        }
-
-        return captured;
-    }
-
-    protected void verifyOneBatchedModification(final ActorRef actorRef, final Modification expected,
-            final boolean expIsReady) {
-        List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
-        assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
-
-        verifyBatchedModifications(batchedModifications.get(0), expIsReady, expIsReady, expected);
-    }
-
-    protected void verifyBatchedModifications(final Object message, final boolean expIsReady,
-            final Modification... expected) {
-        verifyBatchedModifications(message, expIsReady, false, expected);
-    }
-
-    protected void verifyBatchedModifications(final Object message, final boolean expIsReady,
-            final boolean expIsDoCommitOnReady, final Modification... expected) {
-        assertEquals("Message type", BatchedModifications.class, message.getClass());
-        BatchedModifications batchedModifications = (BatchedModifications)message;
-        assertEquals("BatchedModifications size", expected.length, batchedModifications.getModifications().size());
-        assertEquals("isReady", expIsReady, batchedModifications.isReady());
-        assertEquals("isDoCommitOnReady", expIsDoCommitOnReady, batchedModifications.isDoCommitOnReady());
-        for (int i = 0; i < batchedModifications.getModifications().size(); i++) {
-            Modification actual = batchedModifications.getModifications().get(i);
-            assertEquals("Modification type", expected[i].getClass(), actual.getClass());
-            assertEquals("getPath", ((AbstractModification)expected[i]).getPath(),
-                    ((AbstractModification)actual).getPath());
-            if (actual instanceof WriteModification) {
-                assertEquals("getData", ((WriteModification)expected[i]).getData(),
-                        ((WriteModification)actual).getData());
-            }
-        }
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    protected void verifyCohortFutures(final AbstractThreePhaseCommitCohort<?> proxy,
-            final Object... expReplies) {
-        assertEquals("getReadyOperationFutures size", expReplies.length,
-                proxy.getCohortFutures().size());
-
-        List<Object> futureResults = new ArrayList<>();
-        for (Future<?> future : proxy.getCohortFutures()) {
-            assertNotNull("Ready operation Future is null", future);
-            try {
-                futureResults.add(Await.result(future, FiniteDuration.create(5, TimeUnit.SECONDS)));
-            } catch (Exception e) {
-                futureResults.add(e);
-            }
-        }
-
-        for (Object expReply : expReplies) {
-            boolean found = false;
-            Iterator<?> iter = futureResults.iterator();
-            while (iter.hasNext()) {
-                Object actual = iter.next();
-                if (CommitTransactionReply.isSerializedType(expReply)
-                        && CommitTransactionReply.isSerializedType(actual)
-                        || expReply instanceof ActorSelection && Objects.equals(expReply, actual)) {
-                    found = true;
-                } else if (expReply instanceof Class && ((Class<?>) expReply).isInstance(actual)) {
-                    found = true;
-                }
-
-                if (found) {
-                    iter.remove();
-                    break;
-                }
-            }
-
-            if (!found) {
-                fail(String.format("No cohort Future response found for %s. Actual: %s", expReply, futureResults));
-            }
-        }
-    }
-}
index a71d99e055380d4acfaf1290d4d4f444ecd91eec..9ce9cc743bc1596bf3237496871af0907b8c0b11 100644 (file)
@@ -8,7 +8,7 @@
 package org.opendaylight.controller.cluster.datastore;
 
 import static org.junit.Assert.assertFalse;
-import static org.mockito.ArgumentMatchers.anyCollection;
+import static org.mockito.ArgumentMatchers.anyList;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
@@ -27,7 +27,7 @@ import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
 import org.opendaylight.controller.cluster.datastore.messages.DataTreeChangedReply;
 import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 
 public class DataTreeChangeListenerActorTest extends AbstractActorTest {
     private TestKit testKit;
@@ -68,7 +68,7 @@ public class DataTreeChangeListenerActorTest extends AbstractActorTest {
 
         testKit.within(Duration.ofSeconds(1), () -> {
             testKit.expectNoMessage();
-            verify(mockListener, never()).onDataTreeChanged(anyCollection());
+            verify(mockListener, never()).onDataTreeChanged(anyList());
             return null;
         });
     }
index 373d4d7188b155a6e6da9ba0a861cc7cf576da1f..b0d38fba4750567b074aaff1ac56e4b7a483a3f1 100644 (file)
@@ -9,10 +9,11 @@ package org.opendaylight.controller.cluster.datastore;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
 import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 
@@ -27,9 +28,14 @@ import akka.util.Timeout;
 import com.google.common.util.concurrent.MoreExecutors;
 import com.google.common.util.concurrent.Uninterruptibles;
 import java.time.Duration;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.Executor;
 import java.util.concurrent.TimeUnit;
+import java.util.function.Consumer;
+import org.eclipse.jdt.annotation.NonNullByDefault;
 import org.junit.Test;
-import org.mockito.stubbing.Answer;
+import org.mockito.ArgumentCaptor;
 import org.opendaylight.controller.cluster.common.actor.Dispatchers;
 import org.opendaylight.controller.cluster.datastore.config.Configuration;
 import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
@@ -42,37 +48,30 @@ import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeNo
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import scala.concurrent.ExecutionContextExecutor;
-import scala.concurrent.Future;
 
 public class DataTreeChangeListenerProxyTest extends AbstractActorTest {
     private final DOMDataTreeChangeListener mockListener = mock(DOMDataTreeChangeListener.class);
 
     @Test(timeout = 10000)
     public void testSuccessfulRegistration() {
-        final TestKit kit = new TestKit(getSystem());
-        ActorUtils actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
+        final var kit = new TestKit(getSystem());
+        final var actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
             mock(Configuration.class));
 
-        final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
-        final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy = new DataTreeChangeListenerProxy<>(
-                actorUtils, mockListener, path);
+        final var path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+        final var proxy = startProxyAsync(actorUtils, path, false);
 
-        new Thread(() -> proxy.init("shard-1")).start();
-
-        Duration timeout = Duration.ofSeconds(5);
-        FindLocalShard findLocalShard = kit.expectMsgClass(timeout, FindLocalShard.class);
-        assertEquals("getShardName", "shard-1", findLocalShard.getShardName());
+        final var timeout = Duration.ofSeconds(5);
+        final var findLocalShard = kit.expectMsgClass(timeout, FindLocalShard.class);
+        assertEquals("shard-1", findLocalShard.getShardName());
 
         kit.reply(new LocalShardFound(kit.getRef()));
 
-        RegisterDataTreeChangeListener registerMsg = kit.expectMsgClass(timeout,
-            RegisterDataTreeChangeListener.class);
-        assertEquals("getPath", path, registerMsg.getPath());
-        assertFalse("isRegisterOnAllInstances", registerMsg.isRegisterOnAllInstances());
+        final var registerMsg = kit.expectMsgClass(timeout, RegisterDataTreeChangeListener.class);
+        assertEquals(path, registerMsg.getPath());
+        assertFalse(registerMsg.isRegisterOnAllInstances());
 
         kit.reply(new RegisterDataTreeNotificationListenerReply(kit.getRef()));
 
@@ -80,8 +79,7 @@ public class DataTreeChangeListenerProxyTest extends AbstractActorTest {
             Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
         }
 
-        assertEquals("getListenerRegistrationActor", getSystem().actorSelection(kit.getRef().path()),
-            proxy.getListenerRegistrationActor());
+        assertEquals(getSystem().actorSelection(kit.getRef().path()), proxy.getListenerRegistrationActor());
 
         kit.watch(proxy.getDataChangeListenerActor());
 
@@ -100,48 +98,38 @@ public class DataTreeChangeListenerProxyTest extends AbstractActorTest {
 
     @Test(timeout = 10000)
     public void testSuccessfulRegistrationForClusteredListener() {
-        final TestKit kit = new TestKit(getSystem());
-        ActorUtils actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
+        final var kit = new TestKit(getSystem());
+        final var actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
             mock(Configuration.class));
 
-        ClusteredDOMDataTreeChangeListener mockClusteredListener = mock(
-            ClusteredDOMDataTreeChangeListener.class);
-
-        final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
-        final DataTreeChangeListenerProxy<ClusteredDOMDataTreeChangeListener> proxy =
-                new DataTreeChangeListenerProxy<>(actorUtils, mockClusteredListener, path);
+        final var path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+        final var proxy = startProxyAsync(actorUtils, path, true);
 
-        new Thread(() -> proxy.init("shard-1")).start();
-
-        Duration timeout = Duration.ofSeconds(5);
-        FindLocalShard findLocalShard = kit.expectMsgClass(timeout, FindLocalShard.class);
-        assertEquals("getShardName", "shard-1", findLocalShard.getShardName());
+        final var timeout = Duration.ofSeconds(5);
+        final var findLocalShard = kit.expectMsgClass(timeout, FindLocalShard.class);
+        assertEquals("shard-1", findLocalShard.getShardName());
 
         kit.reply(new LocalShardFound(kit.getRef()));
 
-        RegisterDataTreeChangeListener registerMsg = kit.expectMsgClass(timeout,
-            RegisterDataTreeChangeListener.class);
-        assertEquals("getPath", path, registerMsg.getPath());
-        assertTrue("isRegisterOnAllInstances", registerMsg.isRegisterOnAllInstances());
+        final var registerMsg = kit.expectMsgClass(timeout, RegisterDataTreeChangeListener.class);
+        assertEquals(path, registerMsg.getPath());
+        assertTrue(registerMsg.isRegisterOnAllInstances());
 
         proxy.close();
     }
 
     @Test(timeout = 10000)
     public void testLocalShardNotFound() {
-        final TestKit kit = new TestKit(getSystem());
-        ActorUtils actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
+        final var kit = new TestKit(getSystem());
+        final var actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
             mock(Configuration.class));
 
-        final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
-        final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy = new DataTreeChangeListenerProxy<>(
-                actorUtils, mockListener, path);
-
-        new Thread(() -> proxy.init("shard-1")).start();
+        final var path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+        final var proxy = startProxyAsync(actorUtils, path, true);
 
-        Duration timeout = Duration.ofSeconds(5);
-        FindLocalShard findLocalShard = kit.expectMsgClass(timeout, FindLocalShard.class);
-        assertEquals("getShardName", "shard-1", findLocalShard.getShardName());
+        final var timeout = Duration.ofSeconds(5);
+        final var findLocalShard = kit.expectMsgClass(timeout, FindLocalShard.class);
+        assertEquals("shard-1", findLocalShard.getShardName());
 
         kit.reply(new LocalShardNotFound("shard-1"));
 
@@ -152,19 +140,16 @@ public class DataTreeChangeListenerProxyTest extends AbstractActorTest {
 
     @Test(timeout = 10000)
     public void testLocalShardNotInitialized() {
-        final TestKit kit = new TestKit(getSystem());
-        ActorUtils actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
+        final var kit = new TestKit(getSystem());
+        final var actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
             mock(Configuration.class));
 
-        final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
-        final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy = new DataTreeChangeListenerProxy<>(
-                actorUtils, mockListener, path);
+        final var path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+        final var proxy = startProxyAsync(actorUtils, path, false);
 
-        new Thread(() -> proxy.init("shard-1")).start();
-
-        Duration timeout = Duration.ofSeconds(5);
-        FindLocalShard findLocalShard = kit.expectMsgClass(timeout, FindLocalShard.class);
-        assertEquals("getShardName", "shard-1", findLocalShard.getShardName());
+        final var timeout = Duration.ofSeconds(5);
+        final var findLocalShard = kit.expectMsgClass(timeout, FindLocalShard.class);
+        assertEquals("shard-1", findLocalShard.getShardName());
 
         kit.reply(new NotInitializedException("not initialized"));
 
@@ -178,43 +163,35 @@ public class DataTreeChangeListenerProxyTest extends AbstractActorTest {
 
     @Test
     public void testFailedRegistration() {
-        final TestKit kit = new TestKit(getSystem());
-        ActorSystem mockActorSystem = mock(ActorSystem.class);
+        final var kit = new TestKit(getSystem());
+        final var mockActorSystem = mock(ActorSystem.class);
 
-        ActorRef mockActor = getSystem().actorOf(Props.create(DoNothingActor.class), "testFailedRegistration");
+        final var mockActor = getSystem().actorOf(Props.create(DoNothingActor.class), "testFailedRegistration");
         doReturn(mockActor).when(mockActorSystem).actorOf(any(Props.class));
-        ExecutionContextExecutor executor = ExecutionContexts.fromExecutor(MoreExecutors.directExecutor());
+        final var executor = ExecutionContexts.fromExecutor(MoreExecutors.directExecutor());
 
-        ActorUtils actorUtils = mock(ActorUtils.class);
-        final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+        final var actorUtils = mock(ActorUtils.class);
+        final var path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
 
         doReturn(executor).when(actorUtils).getClientDispatcher();
         doReturn(DatastoreContext.newBuilder().build()).when(actorUtils).getDatastoreContext();
         doReturn(mockActorSystem).when(actorUtils).getActorSystem();
 
-        String shardName = "shard-1";
-        final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy = new DataTreeChangeListenerProxy<>(
-                actorUtils, mockListener, path);
-
         doReturn(kit.duration("5 seconds")).when(actorUtils).getOperationDuration();
-        doReturn(Futures.successful(kit.getRef())).when(actorUtils).findLocalShardAsync(eq(shardName));
+        doReturn(Futures.successful(kit.getRef())).when(actorUtils).findLocalShardAsync("shard-1");
         doReturn(Futures.failed(new RuntimeException("mock"))).when(actorUtils).executeOperationAsync(
             any(ActorRef.class), any(Object.class), any(Timeout.class));
-        doReturn(mock(DatastoreContext.class)).when(actorUtils).getDatastoreContext();
-
-        proxy.init("shard-1");
 
-        assertEquals("getListenerRegistrationActor", null, proxy.getListenerRegistrationActor());
+        final var proxy = DataTreeChangeListenerProxy.of(actorUtils, mockListener, path, true, "shard-1");
+        assertNull(proxy.getListenerRegistrationActor());
 
         proxy.close();
     }
 
     @Test
     public void testCloseBeforeRegistration() {
-        final TestKit kit = new TestKit(getSystem());
-        ActorUtils actorUtils = mock(ActorUtils.class);
-
-        String shardName = "shard-1";
+        final var kit = new TestKit(getSystem());
+        final var actorUtils = mock(ActorUtils.class);
 
         doReturn(DatastoreContext.newBuilder().build()).when(actorUtils).getDatastoreContext();
         doReturn(getSystem().dispatchers().defaultGlobalDispatcher()).when(actorUtils).getClientDispatcher();
@@ -223,23 +200,46 @@ public class DataTreeChangeListenerProxyTest extends AbstractActorTest {
         doReturn(getSystem().actorSelection(kit.getRef().path())).when(actorUtils).actorSelection(
             kit.getRef().path());
         doReturn(kit.duration("5 seconds")).when(actorUtils).getOperationDuration();
-        doReturn(Futures.successful(kit.getRef())).when(actorUtils).findLocalShardAsync(eq(shardName));
+        doReturn(Futures.successful(kit.getRef())).when(actorUtils).findLocalShardAsync("shard-1");
 
-        final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy = new DataTreeChangeListenerProxy<>(
-                actorUtils, mockListener, YangInstanceIdentifier.of(TestModel.TEST_QNAME));
+        final var proxy = createProxy(actorUtils, YangInstanceIdentifier.of(TestModel.TEST_QNAME), true);
+        final var instance = proxy.getKey();
 
-        Answer<Future<Object>> answer = invocation -> {
-            proxy.close();
-            return Futures.successful((Object) new RegisterDataTreeNotificationListenerReply(kit.getRef()));
-        };
+        doAnswer(invocation -> {
+            instance.close();
+            return Futures.successful(new RegisterDataTreeNotificationListenerReply(kit.getRef()));
+        }).when(actorUtils).executeOperationAsync(any(ActorRef.class), any(Object.class), any(Timeout.class));
+        proxy.getValue().run();
 
-        doAnswer(answer).when(actorUtils).executeOperationAsync(any(ActorRef.class), any(Object.class),
-            any(Timeout.class));
+        kit.expectMsgClass(Duration.ofSeconds(5), CloseDataTreeNotificationListenerRegistration.class);
 
-        proxy.init(shardName);
+        assertNull(instance.getListenerRegistrationActor());
+    }
 
-        kit.expectMsgClass(Duration.ofSeconds(5), CloseDataTreeNotificationListenerRegistration.class);
+    @NonNullByDefault
+    private DataTreeChangeListenerProxy startProxyAsync(final ActorUtils actorUtils, final YangInstanceIdentifier path,
+            final boolean clustered) {
+        return startProxyAsync(actorUtils, path, clustered, Runnable::run);
+    }
+
+    @NonNullByDefault
+    private DataTreeChangeListenerProxy startProxyAsync(final ActorUtils actorUtils, final YangInstanceIdentifier path,
+            final boolean clustered, final Consumer<Runnable> execute) {
+        final var proxy = createProxy(actorUtils, path, clustered);
+        final var thread = new Thread(proxy.getValue());
+        thread.setDaemon(true);
+        thread.start();
+        return proxy.getKey();
+    }
 
-        assertEquals("getListenerRegistrationActor", null, proxy.getListenerRegistrationActor());
+    @NonNullByDefault
+    private Entry<DataTreeChangeListenerProxy, Runnable> createProxy(final ActorUtils actorUtils,
+            final YangInstanceIdentifier path, final boolean clustered) {
+        final var executor = mock(Executor.class);
+        final var captor = ArgumentCaptor.forClass(Runnable.class);
+        doNothing().when(executor).execute(captor.capture());
+        final var proxy = DataTreeChangeListenerProxy.ofTesting(actorUtils, mockListener, path, clustered, "shard-1",
+            executor);
+        return Map.entry(proxy, captor.getValue());
     }
 }
index c7f6b285cbdc06de4279040749300d2a34fb9cfa..88653642d26d423188fd41309714c36fdbe498e1 100644 (file)
@@ -40,8 +40,9 @@ import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeNo
 import org.opendaylight.controller.cluster.datastore.utils.MockDataTreeChangeListener;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
 import scala.concurrent.Await;
 import scala.concurrent.duration.FiniteDuration;
 
@@ -77,7 +78,9 @@ public class DataTreeChangeListenerSupportTest extends AbstractShardTest {
 
     @Test
     public void testInitialChangeListenerEventWithContainerPath() throws DataValidationFailedException {
-        writeToStore(shard.getDataStore(), TEST_PATH, ImmutableNodes.containerNode(TEST_QNAME));
+        writeToStore(shard.getDataStore(), TEST_PATH, ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+            .build());
 
         Entry<MockDataTreeChangeListener, ActorSelection> entry = registerChangeListener(TEST_PATH, 1);
         MockDataTreeChangeListener listener = entry.getKey();
@@ -87,7 +90,9 @@ public class DataTreeChangeListenerSupportTest extends AbstractShardTest {
 
         listener.reset(1);
 
-        writeToStore(shard.getDataStore(), TEST_PATH, ImmutableNodes.containerNode(TEST_QNAME));
+        writeToStore(shard.getDataStore(), TEST_PATH, ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+            .build());
         listener.waitForChangeEvents();
         listener.verifyNotifiedData(TEST_PATH);
 
@@ -96,7 +101,9 @@ public class DataTreeChangeListenerSupportTest extends AbstractShardTest {
         entry.getValue().tell(CloseDataTreeNotificationListenerRegistration.getInstance(), kit.getRef());
         kit.expectMsgClass(Duration.ofSeconds(5), CloseDataTreeNotificationListenerRegistrationReply.class);
 
-        writeToStore(shard.getDataStore(), TEST_PATH, ImmutableNodes.containerNode(TEST_QNAME));
+        writeToStore(shard.getDataStore(), TEST_PATH, ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+            .build());
         listener.verifyNoNotifiedData(TEST_PATH);
     }
 
index 5db3f39b6efe40ae3255a2542472aaecb883bc8b..7a3e2683921c3a6d99f04ea5ef41e5fe34240de4 100644 (file)
@@ -10,6 +10,7 @@ package org.opendaylight.controller.cluster.datastore;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyCollection;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.reset;
@@ -45,8 +46,8 @@ import org.opendaylight.mdsal.dom.api.DOMDataTreeCandidate;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
 import org.opendaylight.yangtools.util.concurrent.FluentFutures;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import scala.concurrent.Await;
 
 /**
@@ -56,7 +57,7 @@ import scala.concurrent.Await;
  */
 public class DataTreeCohortActorTest extends AbstractActorTest {
     private static final Collection<DOMDataTreeCandidate> CANDIDATES = new ArrayList<>();
-    private static final SchemaContext MOCK_SCHEMA = mock(SchemaContext.class);
+    private static final EffectiveModelContext MOCK_SCHEMA = mock(EffectiveModelContext.class);
     private final TestActorFactory actorFactory = new TestActorFactory(getSystem());
     private final DOMDataTreeCommitCohort mockCohort = mock(DOMDataTreeCommitCohort.class);
     private final PostCanCommitStep mockPostCanCommit = mock(PostCanCommitStep.class);
@@ -108,13 +109,12 @@ public class DataTreeCohortActorTest extends AbstractActorTest {
         askAndAwait(cohortActor, new Commit(txId2));
     }
 
-    @SuppressWarnings("unchecked")
     @Test
     public void testAsyncCohort() throws Exception {
         ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newSingleThreadExecutor());
 
         doReturn(executeWithDelay(executor, mockPostCanCommit))
-                .when(mockCohort).canCommit(any(Object.class), any(SchemaContext.class), any(Collection.class));
+                .when(mockCohort).canCommit(any(Object.class), any(EffectiveModelContext.class), anyCollection());
 
         doReturn(executor.submit(() -> mockPostPreCommit)).when(mockPostCanCommit).preCommit();
 
@@ -135,13 +135,12 @@ public class DataTreeCohortActorTest extends AbstractActorTest {
         executor.shutdownNow();
     }
 
-    @SuppressWarnings("unchecked")
     @Test
     public void testFailureOnCanCommit() throws Exception {
-        DataValidationFailedException failure = new DataValidationFailedException(YangInstanceIdentifier.empty(),
+        DataValidationFailedException failure = new DataValidationFailedException(YangInstanceIdentifier.of(),
                 "mock");
         doReturn(FluentFutures.immediateFailedFluentFuture(failure)).when(mockCohort).canCommit(any(Object.class),
-                any(SchemaContext.class), any(Collection.class));
+                any(EffectiveModelContext.class), anyCollection());
 
         ActorRef cohortActor = newCohortActor("testFailureOnCanCommit");
 
@@ -196,16 +195,15 @@ public class DataTreeCohortActorTest extends AbstractActorTest {
     }
 
     private ActorRef newCohortActor(final String name) {
-        return actorFactory.createActor(DataTreeCohortActor.props(mockCohort, YangInstanceIdentifier.empty()), name);
+        return actorFactory.createActor(DataTreeCohortActor.props(mockCohort, YangInstanceIdentifier.of()), name);
     }
 
-    @SuppressWarnings("unchecked")
     private void resetMockCohort() {
         reset(mockCohort);
         doReturn(ThreePhaseCommitStep.NOOP_ABORT_FUTURE).when(mockPostCanCommit).abort();
         doReturn(Futures.immediateFuture(mockPostPreCommit)).when(mockPostCanCommit).preCommit();
         doReturn(FluentFutures.immediateFluentFuture(mockPostCanCommit)).when(mockCohort).canCommit(any(Object.class),
-                any(SchemaContext.class), any(Collection.class));
+                any(EffectiveModelContext.class), anyCollection());
 
         doReturn(ThreePhaseCommitStep.NOOP_ABORT_FUTURE).when(mockPostPreCommit).abort();
         doReturn(Futures.immediateFuture(null)).when(mockPostPreCommit).commit();
index 97b5f75f8f0fc5ff9c8f3cd2c0b2149e186c4ee5..7eb534c334e3b3afb8811a3216994c8e7994ff53 100644 (file)
@@ -12,6 +12,7 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertSame;
 import static org.junit.Assert.fail;
 import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyCollection;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.reset;
@@ -27,7 +28,6 @@ import com.google.common.base.Throwables;
 import com.google.common.util.concurrent.FluentFuture;
 import com.typesafe.config.ConfigFactory;
 import java.util.Collection;
-import java.util.Optional;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 import org.junit.AfterClass;
@@ -35,6 +35,7 @@ import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
+import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
 import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.mdsal.common.api.DataValidationFailedException;
@@ -47,16 +48,14 @@ import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
 import org.opendaylight.yangtools.util.concurrent.FluentFutures;
 import org.opendaylight.yangtools.yang.common.Uint64;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 
 public class DataTreeCohortIntegrationTest {
 
@@ -93,36 +92,36 @@ public class DataTreeCohortIntegrationTest {
     @SuppressWarnings({ "unchecked", "rawtypes" })
     @Test
     public void testSuccessfulCanCommitWithNoopPostStep() throws Exception {
-        final DOMDataTreeCommitCohort cohort = mock(DOMDataTreeCommitCohort.class);
+        final var cohort = mock(DOMDataTreeCommitCohort.class);
         doReturn(PostCanCommitStep.NOOP_SUCCESSFUL_FUTURE).when(cohort).canCommit(any(Object.class),
-                any(SchemaContext.class), any(Collection.class));
+                any(EffectiveModelContext.class), anyCollection());
         ArgumentCaptor<Collection> candidateCapt = ArgumentCaptor.forClass(Collection.class);
         IntegrationTestKit kit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
 
-        try (AbstractDataStore dataStore = kit.setupAbstractDataStore(
-                DistributedDataStore.class, "testSuccessfulCanCommitWithNoopPostStep", "test-1")) {
-            final ObjectRegistration<DOMDataTreeCommitCohort> cohortReg = dataStore.registerCommitCohort(TEST_ID,
-                    cohort);
+        try (var dataStore = kit.setupDataStore(ClientBackedDataStore.class, "testSuccessfulCanCommitWithNoopPostStep",
+            "test-1")) {
+
+            final var cohortReg = dataStore.registerCommitCohort(TEST_ID, cohort);
             assertNotNull(cohortReg);
 
             IntegrationTestKit.verifyShardState(dataStore, "test-1",
                 state -> assertEquals("Cohort registrations", 1, state.getCommitCohortActors().size()));
 
-            final ContainerNode node = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+            final var node = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
             kit.testWriteTransaction(dataStore, TestModel.TEST_PATH, node);
-            verify(cohort).canCommit(any(Object.class), any(SchemaContext.class), candidateCapt.capture());
+            verify(cohort).canCommit(any(Object.class), any(EffectiveModelContext.class), candidateCapt.capture());
             assertDataTreeCandidate((DOMDataTreeCandidate) candidateCapt.getValue().iterator().next(), TEST_ID,
-                    ModificationType.WRITE, Optional.of(node), Optional.empty());
+                    ModificationType.WRITE, node, null);
 
             reset(cohort);
             doReturn(PostCanCommitStep.NOOP_SUCCESSFUL_FUTURE).when(cohort).canCommit(any(Object.class),
-                    any(SchemaContext.class), any(Collection.class));
+                    any(EffectiveModelContext.class), anyCollection());
 
             kit.testWriteTransaction(dataStore, TestModel.OUTER_LIST_PATH,
                     ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME)
                     .withChild(ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 42))
                     .build());
-            verify(cohort).canCommit(any(Object.class), any(SchemaContext.class), any(Collection.class));
+            verify(cohort).canCommit(any(Object.class), any(EffectiveModelContext.class), anyCollection());
 
             cohortReg.close();
 
@@ -134,17 +133,15 @@ public class DataTreeCohortIntegrationTest {
         }
     }
 
-    @SuppressWarnings("unchecked")
     @Test
     public void testFailedCanCommit() throws Exception {
-        final DOMDataTreeCommitCohort failedCohort = mock(DOMDataTreeCommitCohort.class);
+        final var failedCohort = mock(DOMDataTreeCommitCohort.class);
 
         doReturn(FAILED_CAN_COMMIT_FUTURE).when(failedCohort).canCommit(any(Object.class),
-                any(SchemaContext.class), any(Collection.class));
+                any(EffectiveModelContext.class), anyCollection());
 
-        IntegrationTestKit kit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = kit.setupAbstractDataStore(
-                DistributedDataStore.class, "testFailedCanCommit", "test-1")) {
+        final var kit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
+        try (var dataStore = kit.setupDataStore(ClientBackedDataStore.class, "testFailedCanCommit", "test-1")) {
             dataStore.registerCommitCohort(TEST_ID, failedCohort);
 
             IntegrationTestKit.verifyShardState(dataStore, "test-1",
@@ -165,14 +162,15 @@ public class DataTreeCohortIntegrationTest {
     @SuppressWarnings({ "unchecked", "rawtypes" })
     @Test
     public void testCanCommitWithListEntries() throws Exception {
-        final DOMDataTreeCommitCohort cohort = mock(DOMDataTreeCommitCohort.class);
+        final var cohort = mock(DOMDataTreeCommitCohort.class);
         doReturn(PostCanCommitStep.NOOP_SUCCESSFUL_FUTURE).when(cohort).canCommit(any(Object.class),
-                any(SchemaContext.class), any(Collection.class));
-        IntegrationTestKit kit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
+                any(EffectiveModelContext.class), anyCollection());
+        final var kit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
 
-        try (AbstractDataStore dataStore = kit.setupAbstractDataStore(
-                DistributedDataStore.class, "testCanCommitWithMultipleListEntries", "cars-1")) {
-            final ObjectRegistration<DOMDataTreeCommitCohort> cohortReg = dataStore.registerCommitCohort(
+        try (var dataStore = kit.setupDataStore(ClientBackedDataStore.class, "testCanCommitWithMultipleListEntries",
+            "cars-1")) {
+
+            final var cohortReg = dataStore.registerCommitCohort(
                     new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, CarsModel.CAR_LIST_PATH
                             .node(CarsModel.CAR_QNAME)), cohort);
             assertNotNull(cohortReg);
@@ -197,10 +195,10 @@ public class DataTreeCohortIntegrationTest {
             kit.doCommit(writeTx.ready());
 
             ArgumentCaptor<Collection> candidateCapture = ArgumentCaptor.forClass(Collection.class);
-            verify(cohort).canCommit(any(Object.class), any(SchemaContext.class), candidateCapture.capture());
+            verify(cohort).canCommit(any(Object.class), any(EffectiveModelContext.class), candidateCapture.capture());
             assertDataTreeCandidate((DOMDataTreeCandidate) candidateCapture.getValue().iterator().next(),
                     new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, optimaPath), ModificationType.WRITE,
-                    Optional.of(optimaNode), Optional.empty());
+                    optimaNode, null);
 
             // Write replace the cars container with 2 new car entries. The cohort should get invoked with 3
             // DOMDataTreeCandidates: once for each of the 2 new car entries (WRITE mod) and once for the deleted prior
@@ -208,7 +206,7 @@ public class DataTreeCohortIntegrationTest {
 
             reset(cohort);
             doReturn(PostCanCommitStep.NOOP_SUCCESSFUL_FUTURE).when(cohort).canCommit(any(Object.class),
-                    any(SchemaContext.class), any(Collection.class));
+                    any(EffectiveModelContext.class), anyCollection());
 
             writeTx = dataStore.newWriteOnlyTransaction();
             final YangInstanceIdentifier sportagePath = CarsModel.newCarPath("sportage");
@@ -219,40 +217,40 @@ public class DataTreeCohortIntegrationTest {
             kit.doCommit(writeTx.ready());
 
             candidateCapture = ArgumentCaptor.forClass(Collection.class);
-            verify(cohort).canCommit(any(Object.class), any(SchemaContext.class), candidateCapture.capture());
+            verify(cohort).canCommit(any(Object.class), any(EffectiveModelContext.class), candidateCapture.capture());
 
             assertDataTreeCandidate(findCandidate(candidateCapture, sportagePath), new DOMDataTreeIdentifier(
                     LogicalDatastoreType.CONFIGURATION, sportagePath), ModificationType.WRITE,
-                    Optional.of(sportageNode), Optional.empty());
+                    sportageNode, null);
 
             assertDataTreeCandidate(findCandidate(candidateCapture, soulPath), new DOMDataTreeIdentifier(
                     LogicalDatastoreType.CONFIGURATION, soulPath), ModificationType.WRITE,
-                    Optional.of(soulNode), Optional.empty());
+                    soulNode, null);
 
             assertDataTreeCandidate(findCandidate(candidateCapture, optimaPath), new DOMDataTreeIdentifier(
                     LogicalDatastoreType.CONFIGURATION, optimaPath), ModificationType.DELETE,
-                    Optional.empty(), Optional.of(optimaNode));
+                    null, optimaNode);
 
             // Delete the cars container - cohort should be invoked for the 2 deleted car entries.
 
             reset(cohort);
             doReturn(PostCanCommitStep.NOOP_SUCCESSFUL_FUTURE).when(cohort).canCommit(any(Object.class),
-                    any(SchemaContext.class), any(Collection.class));
+                    any(EffectiveModelContext.class), anyCollection());
 
             writeTx = dataStore.newWriteOnlyTransaction();
             writeTx.delete(CarsModel.BASE_PATH);
             kit.doCommit(writeTx.ready());
 
             candidateCapture = ArgumentCaptor.forClass(Collection.class);
-            verify(cohort).canCommit(any(Object.class), any(SchemaContext.class), candidateCapture.capture());
+            verify(cohort).canCommit(any(Object.class), any(EffectiveModelContext.class), candidateCapture.capture());
 
             assertDataTreeCandidate(findCandidate(candidateCapture, sportagePath), new DOMDataTreeIdentifier(
                     LogicalDatastoreType.CONFIGURATION, sportagePath), ModificationType.DELETE,
-                    Optional.empty(), Optional.of(sportageNode));
+                    null, sportageNode);
 
             assertDataTreeCandidate(findCandidate(candidateCapture, soulPath), new DOMDataTreeIdentifier(
                     LogicalDatastoreType.CONFIGURATION, soulPath), ModificationType.DELETE,
-                    Optional.empty(), Optional.of(soulNode));
+                    null, soulNode);
 
         }
     }
@@ -275,29 +273,28 @@ public class DataTreeCohortIntegrationTest {
      * DataTreeCandidate) and since currently preCommit is a noop in the Shard backend (it is combined with commit),
      * we can't actually test abort after canCommit.
      */
-    @SuppressWarnings("unchecked")
     @Test
     @Ignore
     public void testAbortAfterCanCommit() throws Exception {
-        final DOMDataTreeCommitCohort cohortToAbort = mock(DOMDataTreeCommitCohort.class);
-        final PostCanCommitStep stepToAbort = mock(PostCanCommitStep.class);
+        final var cohortToAbort = mock(DOMDataTreeCommitCohort.class);
+        final var stepToAbort = mock(PostCanCommitStep.class);
         doReturn(ThreePhaseCommitStep.NOOP_ABORT_FUTURE).when(stepToAbort).abort();
         doReturn(PostPreCommitStep.NOOP_FUTURE).when(stepToAbort).preCommit();
         doReturn(FluentFutures.immediateFluentFuture(stepToAbort)).when(cohortToAbort).canCommit(any(Object.class),
-                any(SchemaContext.class), any(Collection.class));
+                any(EffectiveModelContext.class), anyCollection());
 
-        IntegrationTestKit kit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = kit.setupAbstractDataStore(
-                DistributedDataStore.class, "testAbortAfterCanCommit", "test-1", "cars-1")) {
+        var kit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
+        try (var dataStore = kit.setupDataStore(ClientBackedDataStore.class, "testAbortAfterCanCommit",
+                "test-1", "cars-1")) {
             dataStore.registerCommitCohort(TEST_ID, cohortToAbort);
 
             IntegrationTestKit.verifyShardState(dataStore, "test-1",
                 state -> assertEquals("Cohort registrations", 1, state.getCommitCohortActors().size()));
 
-            DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
+            var writeTx = dataStore.newWriteOnlyTransaction();
             writeTx.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
             writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
-            DOMStoreThreePhaseCommitCohort dsCohort = writeTx.ready();
+            var dsCohort = writeTx.ready();
 
             dsCohort.canCommit().get(5, TimeUnit.SECONDS);
             dsCohort.preCommit().get(5, TimeUnit.SECONDS);
@@ -308,20 +305,11 @@ public class DataTreeCohortIntegrationTest {
 
     private static void assertDataTreeCandidate(final DOMDataTreeCandidate candidate,
             final DOMDataTreeIdentifier expTreeId, final ModificationType expType,
-            final Optional<NormalizedNode> expDataAfter, final Optional<NormalizedNode> expDataBefore) {
+            final NormalizedNode expDataAfter, final NormalizedNode expDataBefore) {
         assertNotNull("Expected candidate for path " + expTreeId.getRootIdentifier(), candidate);
         assertEquals("rootPath", expTreeId, candidate.getRootPath());
-        assertEquals("modificationType", expType, candidate.getRootNode().getModificationType());
-
-        assertEquals("dataAfter present", expDataAfter.isPresent(), candidate.getRootNode().getDataAfter().isPresent());
-        if (expDataAfter.isPresent()) {
-            assertEquals("dataAfter", expDataAfter.get(), candidate.getRootNode().getDataAfter().get());
-        }
-
-        assertEquals("dataBefore present", expDataBefore.isPresent(),
-                candidate.getRootNode().getDataBefore().isPresent());
-        if (expDataBefore.isPresent()) {
-            assertEquals("dataBefore", expDataBefore.get(), candidate.getRootNode().getDataBefore().get());
-        }
+        assertEquals("modificationType", expType, candidate.getRootNode().modificationType());
+        assertEquals("dataAfter", expDataAfter, candidate.getRootNode().dataAfter());
+        assertEquals("dataBefore", expDataBefore, candidate.getRootNode().dataBefore());
     }
 }
index a113856627e7ca0ddf70c6cc72ac1d747e678478..244df9c0bc0973d541e7cf0baf742d2b111df019 100644 (file)
@@ -88,8 +88,7 @@ public class DatastoreContextContextPropertiesUpdaterTest {
         return currProps.get(obj);
     }
 
-    private class DummyListenerImpl implements Listener {
-
+    private static final class DummyListenerImpl implements Listener {
         private DatastoreContextFactory contextFactory;
 
         @Override
index 924f1de1ff4730627fdb4e19717666c3f083279b..e271c98c63355f7e70ed752ef3631a3e879aeee0 100644 (file)
@@ -25,8 +25,8 @@ import java.util.Map;
 import org.junit.Test;
 import org.opendaylight.mdsal.binding.dom.codec.impl.BindingCodecContext;
 import org.opendaylight.mdsal.binding.runtime.spi.BindingRuntimeHelpers;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStoreProperties.ExportOnRecovery;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStorePropertiesContainer;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStoreProperties.ExportOnRecovery;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStorePropertiesContainer;
 
 /**
  * Unit tests for DatastoreContextIntrospector.
index 50bd7909367ff9ea64c61077e776fdebcf2c0b91..611da694c3279812471fbe2c4b44207312d374aa 100644 (file)
@@ -34,7 +34,7 @@ import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEF
 import java.util.concurrent.TimeUnit;
 import org.junit.Assert;
 import org.junit.Test;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStoreProperties.ExportOnRecovery;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStoreProperties.ExportOnRecovery;
 
 public class DatastoreContextTest {
 
index 2621d53838685afda71b6fdb995a744903671088..9b13193b15da96b2633ec71a3cff8838780cf116 100644 (file)
@@ -35,10 +35,10 @@ import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.yangtools.yang.common.Uint64;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
 
 /**
  * Unit tests for DatastoreSnapshotRestore.
@@ -130,7 +130,7 @@ public class DatastoreSnapshotRestoreTest {
         DataTree dataTree = new InMemoryDataTreeFactory().create(DataTreeConfiguration.DEFAULT_OPERATIONAL,
             SchemaContextHelper.full());
         AbstractShardTest.writeToStore(dataTree, path, node);
-        NormalizedNode root = AbstractShardTest.readStore(dataTree, YangInstanceIdentifier.empty());
+        NormalizedNode root = AbstractShardTest.readStore(dataTree, YangInstanceIdentifier.of());
 
         return Snapshot.create(new ShardSnapshotState(new MetadataShardDataTreeSnapshot(root)),
                 Collections.<ReplicatedLogEntry>emptyList(), 2, 1, 2, 1, 1, "member-1", null);
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DebugThreePhaseCommitCohortTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DebugThreePhaseCommitCohortTest.java
deleted file mode 100644 (file)
index 16dc540..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.junit.Assert.assertSame;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyString;
-import static org.mockito.ArgumentMatchers.same;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.reset;
-import static org.mockito.Mockito.verify;
-
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.ArrayList;
-import java.util.List;
-import org.junit.Test;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.slf4j.Logger;
-import scala.concurrent.Future;
-
-/**
- * Unit tests for DebugThreePhaseCommitCohort.
- *
- * @author Thomas Pantelis
- */
-public class DebugThreePhaseCommitCohortTest {
-    private final TransactionIdentifier transactionId = MockIdentifiers.transactionIdentifier(
-        DebugThreePhaseCommitCohortTest.class, "mock");
-
-    @Test
-    public void test() {
-        AbstractThreePhaseCommitCohort<?> mockDelegate = mock(AbstractThreePhaseCommitCohort.class);
-        Exception failure = new Exception("mock failure");
-        ListenableFuture<Object> expFailedFuture = Futures.immediateFailedFuture(failure);
-        doReturn(expFailedFuture).when(mockDelegate).canCommit();
-        doReturn(expFailedFuture).when(mockDelegate).preCommit();
-        doReturn(expFailedFuture).when(mockDelegate).commit();
-
-        ListenableFuture<Object> expAbortFuture = Futures.immediateFuture(null);
-        doReturn(expAbortFuture).when(mockDelegate).abort();
-
-        List<Future<Object>> expCohortFutures = new ArrayList<>();
-        doReturn(expCohortFutures).when(mockDelegate).getCohortFutures();
-
-        Throwable debugContext = new RuntimeException("mock");
-        DebugThreePhaseCommitCohort cohort = new DebugThreePhaseCommitCohort(transactionId, mockDelegate, debugContext);
-
-        Logger mockLogger = mock(Logger.class);
-        cohort.setLogger(mockLogger);
-
-        assertSame("canCommit", expFailedFuture, cohort.canCommit());
-        verify(mockLogger).warn(anyString(), same(transactionId), same(failure), same(debugContext));
-
-        reset(mockLogger);
-        assertSame("preCommit", expFailedFuture, cohort.preCommit());
-        verify(mockLogger).warn(anyString(), same(transactionId), same(failure), same(debugContext));
-
-        reset(mockLogger);
-        assertSame("commit", expFailedFuture, cohort.commit());
-        verify(mockLogger).warn(anyString(), same(transactionId), same(failure), same(debugContext));
-
-        assertSame("abort", expAbortFuture, cohort.abort());
-
-        assertSame("getCohortFutures", expCohortFutures, cohort.getCohortFutures());
-
-        reset(mockLogger);
-        ListenableFuture<Boolean> expSuccessFuture = Futures.immediateFuture(Boolean.TRUE);
-        doReturn(expSuccessFuture).when(mockDelegate).canCommit();
-
-        assertSame("canCommit", expSuccessFuture, cohort.canCommit());
-        verify(mockLogger, never()).warn(anyString(), any(TransactionIdentifier.class), any(Throwable.class),
-                any(Throwable.class));
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DelayedTransactionContextWrapperTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DelayedTransactionContextWrapperTest.java
deleted file mode 100644 (file)
index 0d0b03c..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.mockito.Mock;
-import org.mockito.junit.MockitoJUnitRunner;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-
-@RunWith(MockitoJUnitRunner.StrictStubs.class)
-public class DelayedTransactionContextWrapperTest {
-    @Mock
-    private ActorUtils actorUtils;
-
-    @Mock
-    private TransactionContext transactionContext;
-
-    private DelayedTransactionContextWrapper transactionContextWrapper;
-
-    @Before
-    public void setUp() {
-        doReturn(DatastoreContext.newBuilder().build()).when(actorUtils).getDatastoreContext();
-        transactionContextWrapper = new DelayedTransactionContextWrapper(MockIdentifiers.transactionIdentifier(
-            DelayedTransactionContextWrapperTest.class, "mock"), actorUtils, "mock");
-    }
-
-    @Test
-    public void testExecutePriorTransactionOperations() {
-        for (int i = 0; i < 100; i++) {
-            transactionContextWrapper.maybeExecuteTransactionOperation(mock(TransactionOperation.class));
-        }
-        assertEquals(901, transactionContextWrapper.getLimiter().availablePermits());
-
-        transactionContextWrapper.executePriorTransactionOperations(transactionContext);
-
-        assertEquals(1001, transactionContextWrapper.getLimiter().availablePermits());
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DirectTransactionContextWrapperTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DirectTransactionContextWrapperTest.java
deleted file mode 100644 (file)
index 44f246f..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.mockito.Mock;
-import org.mockito.junit.MockitoJUnitRunner;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-
-@RunWith(MockitoJUnitRunner.StrictStubs.class)
-public class DirectTransactionContextWrapperTest {
-    @Mock
-    private ActorUtils actorUtils;
-
-    @Mock
-    private TransactionContext transactionContext;
-
-    @Mock
-    private TransactionOperation transactionOperation;
-
-    private DirectTransactionContextWrapper contextWrapper;
-
-    @Before
-    public void setUp() {
-        doReturn(DatastoreContext.newBuilder().build()).when(actorUtils).getDatastoreContext();
-        contextWrapper = new DirectTransactionContextWrapper(MockIdentifiers.transactionIdentifier(
-                DirectTransactionContextWrapperTest.class, "mock"), actorUtils, "mock",
-                transactionContext);
-    }
-
-    @Test
-    public void testMaybeExecuteTransactionOperation() {
-        contextWrapper.maybeExecuteTransactionOperation(transactionOperation);
-        verify(transactionOperation, times(1)).invoke(transactionContext, null);
-    }
-}
index eb6ab931288bc66d7aba499bdc7da5161355ab7c..6815b2d367a96a7bcdba3b94ca927e133d7b9cd6 100644 (file)
@@ -55,7 +55,7 @@ public class DistributedDataStoreIntegrationTest extends AbstractDistributedData
     @Parameters(name = "{0}")
     public static Collection<Object[]> data() {
         return Arrays.asList(new Object[][] {
-                { TestDistributedDataStore.class }, { TestClientBackedDataStore.class }
+            { TestClientBackedDataStore.class }
         });
     }
 
@@ -87,9 +87,7 @@ public class DistributedDataStoreIntegrationTest extends AbstractDistributedData
         final CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
         InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
 
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, testName, false, shardName)) {
-
+        try (var dataStore = testKit.setupDataStore(testParameter, testName, false, shardName)) {
             // Create the write Tx
             final DOMStoreWriteTransaction writeTx = writeOnly ? dataStore.newWriteOnlyTransaction()
                     : dataStore.newReadWriteTransaction();
@@ -183,9 +181,7 @@ public class DistributedDataStoreIntegrationTest extends AbstractDistributedData
         final CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
         InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
 
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, testName, false, shardName)) {
-
+        try (var dataStore = testKit.setupDataStore(testParameter, testName, false, shardName)) {
             // Create the read-write Tx
             final DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
             assertNotNull("newReadWriteTransaction returned null", readWriteTx);
@@ -251,7 +247,7 @@ public class DistributedDataStoreIntegrationTest extends AbstractDistributedData
 
         InMemoryJournal.addEntry(persistentID, 1, "Dummy data so akka will read from persistence");
 
-        final AbstractDataStore dataStore = testKit.setupAbstractDataStore(testParameter, testName, false, shardName);
+        final var dataStore = testKit.setupDataStore(testParameter, testName, false, shardName);
 
         // Create the write Tx
         final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
@@ -317,8 +313,7 @@ public class DistributedDataStoreIntegrationTest extends AbstractDistributedData
 
         InMemoryJournal.addEntry(persistentID, 1, "Dummy data so akka will read from persistence");
 
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(testParameter, testName, false, shardName)) {
-
+        try (var dataStore = testKit.setupDataStore(testParameter, testName, false, shardName)) {
             // Create the read-write Tx
             final DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
             assertNotNull("newReadWriteTransaction returned null", readWriteTx);
index 323e9d6c5a8b59f7fc29094b01d51c6ee732d5dc..91c00f7eb153c04a6fc64f6782a7349dab80e2f3 100644 (file)
@@ -8,7 +8,6 @@
 package org.opendaylight.controller.cluster.datastore;
 
 import static org.awaitility.Awaitility.await;
-import static org.hamcrest.CoreMatchers.containsString;
 import static org.hamcrest.CoreMatchers.instanceOf;
 import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.Matchers.equalTo;
@@ -18,9 +17,9 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertThrows;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assume.assumeTrue;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyString;
-import static org.mockito.ArgumentMatchers.eq;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.timeout;
@@ -39,6 +38,7 @@ import akka.testkit.javadsl.TestKit;
 import com.google.common.base.Stopwatch;
 import com.google.common.base.Throwables;
 import com.google.common.collect.ImmutableMap;
+import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.MoreExecutors;
 import com.google.common.util.concurrent.Uninterruptibles;
@@ -48,6 +48,7 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.Map;
 import java.util.Optional;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
@@ -72,8 +73,6 @@ import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
 import org.opendaylight.controller.cluster.datastore.TestShard.RequestFrontendMetadata;
 import org.opendaylight.controller.cluster.datastore.TestShard.StartDropMessages;
 import org.opendaylight.controller.cluster.datastore.TestShard.StopDropMessages;
-import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
-import org.opendaylight.controller.cluster.datastore.exceptions.ShardLeaderNotRespondingException;
 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
@@ -106,7 +105,6 @@ import org.opendaylight.mdsal.common.api.OptimisticLockFailedException;
 import org.opendaylight.mdsal.common.api.TransactionCommitFailedException;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
 import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
 import org.opendaylight.mdsal.dom.spi.store.DOMStore;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
@@ -115,18 +113,17 @@ import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
 import org.opendaylight.yangtools.yang.common.Uint64;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.SystemMapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.builder.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ConflictingModificationAppliedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.ConflictingModificationAppliedException;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import scala.collection.Set;
 import scala.concurrent.Await;
@@ -144,12 +141,12 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
     @Parameters(name = "{0}")
     public static Collection<Object[]> data() {
         return Arrays.asList(new Object[][] {
-                { TestDistributedDataStore.class, 7 }, { TestClientBackedDataStore.class, 12 }
+                { TestClientBackedDataStore.class, 12 }
         });
     }
 
     @Parameter(0)
-    public Class<? extends AbstractDataStore> testParameter;
+    public Class<? extends ClientBackedDataStore> testParameter;
     @Parameter(1)
     public int commitTimeout;
 
@@ -179,8 +176,8 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
     private final TransactionIdentifier tx1 = nextTransactionId();
     private final TransactionIdentifier tx2 = nextTransactionId();
 
-    private AbstractDataStore followerDistributedDataStore;
-    private AbstractDataStore leaderDistributedDataStore;
+    private ClientBackedDataStore followerDistributedDataStore;
+    private ClientBackedDataStore leaderDistributedDataStore;
     private IntegrationTestKit followerTestKit;
     private IntegrationTestKit leaderTestKit;
 
@@ -202,7 +199,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
     @After
     public void tearDown() {
         if (followerDistributedDataStore != null) {
-            leaderDistributedDataStore.close();
+            followerDistributedDataStore.close();
         }
         if (leaderDistributedDataStore != null) {
             leaderDistributedDataStore.close();
@@ -235,11 +232,11 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
                     throws Exception {
         leaderTestKit = new IntegrationTestKit(leaderSystem, leaderBuilder, commitTimeout);
 
-        leaderDistributedDataStore = leaderTestKit.setupAbstractDataStore(
-                testParameter, type, moduleShardsConfig, false, shards);
+        leaderDistributedDataStore = leaderTestKit.setupDataStore(testParameter, type, moduleShardsConfig, false,
+            shards);
 
         followerTestKit = new IntegrationTestKit(followerSystem, followerBuilder, commitTimeout);
-        followerDistributedDataStore = followerTestKit.setupAbstractDataStore(
+        followerDistributedDataStore = followerTestKit.setupDataStore(
                 testParameter, type, moduleShardsConfig, false, shards);
 
         leaderTestKit.waitUntilLeader(leaderDistributedDataStore.getActorUtils(), shards);
@@ -250,16 +247,9 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
 
     private static void verifyCars(final DOMStoreReadTransaction readTx, final MapEntryNode... entries)
             throws Exception {
-        final Optional<NormalizedNode> optional = readTx.read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS);
-        assertTrue("isPresent", optional.isPresent());
-
-        final CollectionNodeBuilder<MapEntryNode, SystemMapNode> listBuilder = ImmutableNodes.mapNodeBuilder(
-                CarsModel.CAR_QNAME);
-        for (final NormalizedNode entry: entries) {
-            listBuilder.withChild((MapEntryNode) entry);
-        }
-
-        assertEquals("Car list node", listBuilder.build(), optional.get());
+        assertEquals("Car list node",
+            Optional.of(ImmutableNodes.mapNodeBuilder(CarsModel.CAR_QNAME).withValue(Arrays.asList(entries)).build()),
+            readTx.read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS));
     }
 
     private static void verifyNode(final DOMStoreReadTransaction readTx, final YangInstanceIdentifier path,
@@ -351,9 +341,8 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
 
         final ActorSystem newSystem = newActorSystem("reinstated-member2", "Member2");
 
-        try (AbstractDataStore member2Datastore = new IntegrationTestKit(newSystem, leaderDatastoreContextBuilder,
-                commitTimeout)
-                .setupAbstractDataStore(testParameter, testName, "module-shards-member2", true, CARS)) {
+        try (var member2Datastore = new IntegrationTestKit(newSystem, leaderDatastoreContextBuilder, commitTimeout)
+                .setupDataStore(testParameter, testName, "module-shards-member2", true, CARS)) {
             verifyCars(member2Datastore.newReadOnlyTransaction(), car2);
         }
     }
@@ -382,35 +371,24 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
 
         // wait to let the shard catch up with purged
         await("Range set leak test").atMost(5, TimeUnit.SECONDS)
-                .pollInterval(500, TimeUnit.MILLISECONDS)
-                .untilAsserted(() -> {
-                    final var localShard = leaderDistributedDataStore.getActorUtils().findLocalShard("cars")
-                        .orElseThrow();
-                    final var frontendMetadata =
-                        (FrontendShardDataTreeSnapshotMetadata) leaderDistributedDataStore.getActorUtils()
-                            .executeOperation(localShard, new RequestFrontendMetadata());
-
-                    final var clientMeta = frontendMetadata.getClients().get(0);
-                    if (leaderDistributedDataStore.getActorUtils().getDatastoreContext().isUseTellBasedProtocol()) {
-                        assertTellClientMetadata(clientMeta, numCars * 2);
-                    } else {
-                        assertAskClientMetadata(clientMeta);
-                    }
-                });
+            .pollInterval(500, TimeUnit.MILLISECONDS)
+            .untilAsserted(() -> {
+                final var localShard = leaderDistributedDataStore.getActorUtils().findLocalShard("cars").orElseThrow();
+                final var frontendMetadata =
+                    (FrontendShardDataTreeSnapshotMetadata) leaderDistributedDataStore.getActorUtils()
+                    .executeOperation(localShard, new RequestFrontendMetadata());
+
+                assertClientMetadata(frontendMetadata.getClients().get(0), numCars * 2);
+            });
 
         try (var tx = txChain.newReadOnlyTransaction()) {
-            final var body = tx.read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS).orElseThrow().body();
-            assertThat(body, instanceOf(Collection.class));
+            final var body = assertInstanceOf(Collection.class,
+                tx.read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS).orElseThrow().body());
             assertEquals(numCars, ((Collection<?>) body).size());
         }
     }
 
-    private static void assertAskClientMetadata(final FrontendClientMetadata clientMeta) {
-        // ask based should track no metadata
-        assertEquals(List.of(), clientMeta.getCurrentHistories());
-    }
-
-    private static void assertTellClientMetadata(final FrontendClientMetadata clientMeta, final long lastPurged) {
+    private static void assertClientMetadata(final FrontendClientMetadata clientMeta, final long lastPurged) {
         final var iterator = clientMeta.getCurrentHistories().iterator();
         var metadata = iterator.next();
         while (iterator.hasNext() && metadata.getHistoryId() != 1) {
@@ -423,19 +401,11 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
 
     @Test
     public void testCloseTransactionMetadataLeak() throws Exception {
-        // FIXME: CONTROLLER-2016: ask-based frontend triggers this:
-        //
-        // java.lang.IllegalStateException: Previous transaction
-        //            member-2-datastore-testCloseTransactionMetadataLeak-fe-0-chn-1-txn-1-0 is not ready yet
-        //        at org.opendaylight.controller.cluster.datastore.TransactionChainProxy$Allocated.checkReady()
-        //        at org.opendaylight.controller.cluster.datastore.TransactionChainProxy.newReadOnlyTransaction()
-        assumeTrue(testParameter.isAssignableFrom(ClientBackedDataStore.class));
-
         initDatastoresWithCars("testCloseTransactionMetadataLeak");
 
-        final DOMStoreTransactionChain txChain = followerDistributedDataStore.createTransactionChain();
+        final var txChain = followerDistributedDataStore.createTransactionChain();
 
-        DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
+        var writeTx = txChain.newWriteOnlyTransaction();
         writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
         writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
         followerTestKit.doCommit(writeTx.ready());
@@ -453,21 +423,15 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
 
         // wait to let the shard catch up with purged
         await("wait for purges to settle").atMost(5, TimeUnit.SECONDS)
-                .pollInterval(500, TimeUnit.MILLISECONDS)
-                .untilAsserted(() -> {
-                    final var localShard = leaderDistributedDataStore.getActorUtils().findLocalShard("cars")
-                        .orElseThrow();
-                    final var frontendMetadata =
-                            (FrontendShardDataTreeSnapshotMetadata) leaderDistributedDataStore.getActorUtils()
-                                    .executeOperation(localShard, new RequestFrontendMetadata());
-
-                    final var clientMeta = frontendMetadata.getClients().get(0);
-                    if (leaderDistributedDataStore.getActorUtils().getDatastoreContext().isUseTellBasedProtocol()) {
-                        assertTellClientMetadata(clientMeta, numCars * 2);
-                    } else {
-                        assertAskClientMetadata(clientMeta);
-                    }
-                });
+            .pollInterval(500, TimeUnit.MILLISECONDS)
+            .untilAsserted(() -> {
+                final var localShard = leaderDistributedDataStore.getActorUtils().findLocalShard("cars").orElseThrow();
+                final var frontendMetadata =
+                    (FrontendShardDataTreeSnapshotMetadata) leaderDistributedDataStore.getActorUtils()
+                    .executeOperation(localShard, new RequestFrontendMetadata());
+
+                assertClientMetadata(frontendMetadata.getClients().get(0), numCars * 2);
+            });
     }
 
     @Test
@@ -649,22 +613,21 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
                         LogicalDatastoreType.CONFIGURATION, followerDistributedDataStore).build(),
                         MoreExecutors.directExecutor());
 
-        final DOMTransactionChainListener listener = mock(DOMTransactionChainListener.class);
-        final DOMTransactionChain txChain = broker.createTransactionChain(listener);
+        final var listener = mock(FutureCallback.class);
+        final DOMTransactionChain txChain = broker.createTransactionChain();
+        txChain.addCallback(listener);
 
         final DOMDataTreeWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
 
-        final ContainerNode invalidData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(CarsModel.BASE_QNAME))
-                    .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build();
+        writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(CarsModel.BASE_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk"))
+            .build());
 
-        writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, invalidData);
+        final var ex = assertThrows(ExecutionException.class, () -> writeTx.commit().get(5, TimeUnit.SECONDS));
+        assertInstanceOf(TransactionCommitFailedException.class, ex.getCause());
 
-        final var ex = assertThrows(ExecutionException.class, () -> writeTx.commit().get(5, TimeUnit.SECONDS))
-            .getCause();
-        assertThat(ex, instanceOf(TransactionCommitFailedException.class));
-
-        verify(listener, timeout(5000)).onTransactionChainFailed(eq(txChain), eq(writeTx), any(Throwable.class));
+        verify(listener, timeout(5000)).onFailure(any());
 
         txChain.close();
         broker.close();
@@ -674,34 +637,32 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
     public void testChainedTransactionFailureWithMultipleShards() throws Exception {
         initDatastoresWithCarsAndPeople("testChainedTransactionFailureWithMultipleShards");
 
-        final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker(
-                ImmutableMap.<LogicalDatastoreType, DOMStore>builder().put(
-                        LogicalDatastoreType.CONFIGURATION, followerDistributedDataStore).build(),
-                        MoreExecutors.directExecutor());
+        try (var broker = new ConcurrentDOMDataBroker(
+            Map.of(LogicalDatastoreType.CONFIGURATION, followerDistributedDataStore), MoreExecutors.directExecutor())) {
 
-        final DOMTransactionChainListener listener = mock(DOMTransactionChainListener.class);
-        final DOMTransactionChain txChain = broker.createTransactionChain(listener);
+            final var listener = mock(FutureCallback.class);
+            final DOMTransactionChain txChain = broker.createTransactionChain();
+            txChain.addCallback(listener);
 
-        final DOMDataTreeWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
+            final DOMDataTreeWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
 
-        writeTx.put(LogicalDatastoreType.CONFIGURATION, PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
+            writeTx.put(LogicalDatastoreType.CONFIGURATION, PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
 
-        final ContainerNode invalidData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(CarsModel.BASE_QNAME))
-                    .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build();
+            // Note that merge will validate the data and fail but put succeeds b/c deep validation is not
+            // done for put for performance reasons.
+            writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, Builders.containerBuilder()
+                .withNodeIdentifier(new NodeIdentifier(CarsModel.BASE_QNAME))
+                .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk"))
+                .build());
 
-        // Note that merge will validate the data and fail but put succeeds b/c deep validation is not
-        // done for put for performance reasons.
-        writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, invalidData);
+            final var ex = assertThrows(ExecutionException.class, () -> writeTx.commit().get(5, TimeUnit.SECONDS))
+                .getCause();
+            assertThat(ex, instanceOf(TransactionCommitFailedException.class));
 
-        final var ex = assertThrows(ExecutionException.class, () -> writeTx.commit().get(5, TimeUnit.SECONDS))
-            .getCause();
-        assertThat(ex, instanceOf(TransactionCommitFailedException.class));
+            verify(listener, timeout(5000)).onFailure(any());
 
-        verify(listener, timeout(5000)).onTransactionChainFailed(eq(txChain), eq(writeTx), any(Throwable.class));
-
-        txChain.close();
-        broker.close();
+            txChain.close();
+        }
     }
 
     @Test
@@ -741,9 +702,8 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
                 .shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5);
         IntegrationTestKit newMember1TestKit = new IntegrationTestKit(leaderSystem, newMember1Builder, commitTimeout);
 
-        try (AbstractDataStore ds =
-                newMember1TestKit.setupAbstractDataStore(
-                        testParameter, testName, MODULE_SHARDS_CARS_ONLY_1_2, false, CARS)) {
+        try (var ds = newMember1TestKit.setupDataStore(testParameter, testName, MODULE_SHARDS_CARS_ONLY_1_2, false,
+            CARS)) {
 
             followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorUtils(), CARS);
 
@@ -761,7 +721,6 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         }
     }
 
-    @SuppressWarnings("unchecked")
     @Test
     public void testReadyLocalTransactionForwardedToLeader() throws Exception {
         initDatastoresWithCars("testReadyLocalTransactionForwardedToLeader");
@@ -786,7 +745,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
 
         ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(tx1 , modification, true, Optional.empty());
 
-        carsFollowerShard.get().tell(readyLocal, followerTestKit.getRef());
+        carsFollowerShard.orElseThrow().tell(readyLocal, followerTestKit.getRef());
         Object resp = followerTestKit.expectMsgClass(Object.class);
         if (resp instanceof akka.actor.Status.Failure) {
             throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
@@ -805,7 +764,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
 
         readyLocal = new ReadyLocalTransaction(tx2 , modification, false, Optional.empty());
 
-        carsFollowerShard.get().tell(readyLocal, followerTestKit.getRef());
+        carsFollowerShard.orElseThrow().tell(readyLocal, followerTestKit.getRef());
         resp = followerTestKit.expectMsgClass(Object.class);
         if (resp instanceof akka.actor.Status.Failure) {
             throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
@@ -826,7 +785,6 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car1, car2);
     }
 
-    @SuppressWarnings("unchecked")
     @Test
     public void testForwardedReadyTransactionForwardedToLeader() throws Exception {
         initDatastoresWithCars("testForwardedReadyTransactionForwardedToLeader");
@@ -836,7 +794,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
                 followerDistributedDataStore.getActorUtils().findLocalShard("cars");
         assertTrue("Cars follower shard found", carsFollowerShard.isPresent());
 
-        carsFollowerShard.get().tell(GetShardDataTree.INSTANCE, followerTestKit.getRef());
+        carsFollowerShard.orElseThrow().tell(GetShardDataTree.INSTANCE, followerTestKit.getRef());
         final DataTree dataTree = followerTestKit.expectMsgClass(DataTree.class);
 
         // Send a tx with immediate commit.
@@ -852,7 +810,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
             new ReadWriteShardDataTreeTransaction(mock(ShardDataTreeTransactionParent.class), tx1, modification),
             true, Optional.empty());
 
-        carsFollowerShard.get().tell(forwardedReady, followerTestKit.getRef());
+        carsFollowerShard.orElseThrow().tell(forwardedReady, followerTestKit.getRef());
         Object resp = followerTestKit.expectMsgClass(Object.class);
         if (resp instanceof akka.actor.Status.Failure) {
             throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
@@ -872,7 +830,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
             new ReadWriteShardDataTreeTransaction(mock(ShardDataTreeTransactionParent.class), tx2, modification),
             false, Optional.empty());
 
-        carsFollowerShard.get().tell(forwardedReady, followerTestKit.getRef());
+        carsFollowerShard.orElseThrow().tell(forwardedReady, followerTestKit.getRef());
         resp = followerTestKit.expectMsgClass(Object.class);
         if (resp instanceof akka.actor.Status.Failure) {
             throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
@@ -900,6 +858,10 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         leaderDatastoreContextBuilder.shardBatchedModificationCount(2);
         initDatastoresWithCarsAndPeople("testTransactionForwardedToLeaderAfterRetry");
 
+        // Verify backend statistics on start
+        verifyCarsReadWriteTransactions(leaderDistributedDataStore, 0);
+        verifyCarsReadWriteTransactions(followerDistributedDataStore, 0);
+
         // Do an initial write to get the primary shard info cached.
 
         final DOMStoreWriteTransaction initialWriteTx = followerDistributedDataStore.newWriteOnlyTransaction();
@@ -939,10 +901,13 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         writeTx2.write(PeopleModel.PERSON_LIST_PATH, people);
         final DOMStoreThreePhaseCommitCohort writeTx2Cohort = writeTx2.ready();
 
+        // At this point only leader should see the transactions
+        verifyCarsReadWriteTransactions(leaderDistributedDataStore, 2);
+        verifyCarsReadWriteTransactions(followerDistributedDataStore, 0);
+
         // Prepare another WO that writes to a single shard and thus will be directly committed on ready. This
-        // tx writes 5 cars so 2 BatchedModidifications messages will be sent initially and cached in the
-        // leader shard (with shardBatchedModificationCount set to 2). The 3rd BatchedModidifications will be
-        // sent on ready.
+        // tx writes 5 cars so 2 BatchedModifications messages will be sent initially and cached in the leader shard
+        // (with shardBatchedModificationCount set to 2). The 3rd BatchedModifications will be sent on ready.
 
         final DOMStoreWriteTransaction writeTx3 = followerDistributedDataStore.newWriteOnlyTransaction();
         for (int i = 1; i <= 5; i++, carIndex++) {
@@ -950,25 +915,27 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
             writeTx3.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
         }
 
-        // Prepare another WO that writes to a single shard. This will send a single BatchedModidifications
-        // message on ready.
+        // Prepare another WO that writes to a single shard. This will send a single BatchedModifications message
+        // on ready.
 
         final DOMStoreWriteTransaction writeTx4 = followerDistributedDataStore.newWriteOnlyTransaction();
         cars.add(CarsModel.newCarEntry("car" + carIndex, Uint64.valueOf(carIndex)));
         writeTx4.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
         carIndex++;
 
-        // Prepare a RW tx that will create a tx actor and send a ForwardedReadyTransaciton message to the
-        // leader shard on ready.
+        // Prepare a RW tx that will create a tx actor and send a ForwardedReadyTransaction message to the leader shard
+        // on ready.
 
         final DOMStoreReadWriteTransaction readWriteTx = followerDistributedDataStore.newReadWriteTransaction();
         cars.add(CarsModel.newCarEntry("car" + carIndex, Uint64.valueOf(carIndex)));
-        readWriteTx.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
+        final YangInstanceIdentifier carPath = CarsModel.newCarPath("car" + carIndex);
+        readWriteTx.write(carPath, cars.getLast());
 
-        // FIXME: CONTROLLER-2017: ClientBackedDataStore reports only 4 transactions
-        assumeTrue(DistributedDataStore.class.isAssignableFrom(testParameter));
-        IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
-            stats -> assertEquals("getReadWriteTransactionCount", 5, stats.getReadWriteTransactionCount()));
+        // There is a difference here between implementations: tell-based protocol enforces batching on per-transaction
+        // level whereas ask-based protocol has a global limit towards a shard -- and hence flushes out last two
+        // transactions eagerly.
+        verifyCarsReadWriteTransactions(leaderDistributedDataStore, 3);
+        verifyCarsReadWriteTransactions(followerDistributedDataStore, 0);
 
         // Disable elections on the leader so it switches to follower.
 
@@ -1001,11 +968,22 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         followerTestKit.doCommit(writeTx4Cohort);
         followerTestKit.doCommit(rwTxCohort);
 
+        // At this point everything is committed and the follower datastore should see 5 transactions, but leader should
+        // only see the initial transactions
+        verifyCarsReadWriteTransactions(leaderDistributedDataStore, 3);
+        verifyCarsReadWriteTransactions(followerDistributedDataStore, 5);
+
         DOMStoreReadTransaction readTx = leaderDistributedDataStore.newReadOnlyTransaction();
         verifyCars(readTx, cars.toArray(new MapEntryNode[cars.size()]));
         verifyNode(readTx, PeopleModel.PERSON_LIST_PATH, people);
     }
 
+    private static void verifyCarsReadWriteTransactions(final ClientBackedDataStore datastore, final int expected)
+            throws Exception {
+        IntegrationTestKit.verifyShardStats(datastore, "cars",
+            stats -> assertEquals("getReadWriteTransactionCount", expected, stats.getReadWriteTransactionCount()));
+    }
+
     @Test
     public void testLeadershipTransferOnShutdown() throws Exception {
         leaderDatastoreContextBuilder.shardBatchedModificationCount(1);
@@ -1016,8 +994,8 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         final IntegrationTestKit follower2TestKit = new IntegrationTestKit(follower2System,
                 DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build()).operationTimeoutInMillis(500),
                 commitTimeout);
-        try (AbstractDataStore follower2DistributedDataStore = follower2TestKit.setupAbstractDataStore(
-                testParameter, testName, MODULE_SHARDS_CARS_PEOPLE_1_2_3, false)) {
+        try (var follower2DistributedDataStore = follower2TestKit.setupDataStore(testParameter, testName,
+            MODULE_SHARDS_CARS_PEOPLE_1_2_3, false)) {
 
             followerTestKit.waitForMembersUp("member-3");
             follower2TestKit.waitForMembersUp("member-1", "member-2");
@@ -1030,21 +1008,18 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
             writeTx.write(PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
             final DOMStoreThreePhaseCommitCohort cohort1 = writeTx.ready();
 
-            final var usesCohorts = DistributedDataStore.class.isAssignableFrom(testParameter);
-            if (usesCohorts) {
-                IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
-                    stats -> assertEquals("getTxCohortCacheSize", 1, stats.getTxCohortCacheSize()));
-            }
+            // FIXME: this assertion should be made in an explicit Shard test
+            //            IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
+            //                stats -> assertEquals("getTxCohortCacheSize", 1, stats.getTxCohortCacheSize()));
 
             writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
             final MapEntryNode car = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
             writeTx.write(CarsModel.newCarPath("optima"), car);
             final DOMStoreThreePhaseCommitCohort cohort2 = writeTx.ready();
 
-            if (usesCohorts) {
-                IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
-                    stats -> assertEquals("getTxCohortCacheSize", 2, stats.getTxCohortCacheSize()));
-            }
+            // FIXME: this assertion should be made in an explicit Shard test
+            //            IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
+            //                stats -> assertEquals("getTxCohortCacheSize", 2, stats.getTxCohortCacheSize()));
 
             // Gracefully stop the leader via a Shutdown message.
 
@@ -1109,47 +1084,32 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
             raftState -> assertEquals("getRaftState", "IsolatedLeader", raftState.getRaftState()));
 
         final var noShardLeaderCohort = noShardLeaderWriteTx.ready();
-        final ListenableFuture<Boolean> canCommit;
-
-        // There is difference in behavior here:
-        if (!leaderDistributedDataStore.getActorUtils().getDatastoreContext().isUseTellBasedProtocol()) {
-            // ask-based canCommit() times out and aborts
-            final var ex = assertThrows(ExecutionException.class,
-                () -> leaderTestKit.doCommit(noShardLeaderCohort)).getCause();
-            assertThat(ex, instanceOf(NoShardLeaderException.class));
-            assertThat(ex.getMessage(), containsString(
-                "Shard member-1-shard-cars-testTransactionWithIsolatedLeader currently has no leader."));
-            canCommit = null;
-        } else {
-            // tell-based canCommit() does not have a real timeout and hence continues
-            canCommit = noShardLeaderCohort.canCommit();
-            Uninterruptibles.sleepUninterruptibly(commitTimeout, TimeUnit.SECONDS);
-            assertFalse(canCommit.isDone());
-        }
+        // tell-based canCommit() does not have a real timeout and hence continues
+        final var canCommit = noShardLeaderCohort.canCommit();
+        Uninterruptibles.sleepUninterruptibly(commitTimeout, TimeUnit.SECONDS);
+        assertFalse(canCommit.isDone());
 
         sendDatastoreContextUpdate(leaderDistributedDataStore, leaderDatastoreContextBuilder
                 .shardElectionTimeoutFactor(100));
 
         final DOMStoreThreePhaseCommitCohort successTxCohort = successWriteTx.ready();
 
-        followerDistributedDataStore = followerTestKit.setupAbstractDataStore(
-                testParameter, testName, MODULE_SHARDS_CARS_ONLY_1_2, false, CARS);
+        followerDistributedDataStore = followerTestKit.setupDataStore(testParameter, testName,
+            MODULE_SHARDS_CARS_ONLY_1_2, false, CARS);
 
         leaderTestKit.doCommit(preIsolatedLeaderTxCohort);
         leaderTestKit.doCommit(successTxCohort);
 
-        // continuation of tell-based protocol: readied transaction will complete commit, but will report an OLFE
-        if (canCommit != null) {
-            final var ex = assertThrows(ExecutionException.class,
-                () -> canCommit.get(commitTimeout, TimeUnit.SECONDS)).getCause();
-            assertThat(ex, instanceOf(OptimisticLockFailedException.class));
-            assertEquals("Optimistic lock failed for path " + CarsModel.BASE_PATH, ex.getMessage());
-            final var cause = ex.getCause();
-            assertThat(cause, instanceOf(ConflictingModificationAppliedException.class));
-            final var cmae = (ConflictingModificationAppliedException) cause;
-            assertEquals("Node was created by other transaction.", cmae.getMessage());
-            assertEquals(CarsModel.BASE_PATH, cmae.getPath());
-        }
+        // continuation of canCommit(): readied transaction will complete commit, but will report an OLFE
+        final var ex = assertThrows(ExecutionException.class,
+            () -> canCommit.get(commitTimeout, TimeUnit.SECONDS)).getCause();
+        assertThat(ex, instanceOf(OptimisticLockFailedException.class));
+        assertEquals("Optimistic lock failed for path " + CarsModel.BASE_PATH, ex.getMessage());
+        final var cause = ex.getCause();
+        assertThat(cause, instanceOf(ConflictingModificationAppliedException.class));
+        final var cmae = (ConflictingModificationAppliedException) cause;
+        assertEquals("Node was created by other transaction.", cmae.getMessage());
+        assertEquals(CarsModel.BASE_PATH, cmae.getPath());
     }
 
     @Test
@@ -1175,13 +1135,8 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
 
         final var ex = assertThrows(ExecutionException.class, () -> followerTestKit.doCommit(rwTx.ready()));
-        final String msg = "Unexpected exception: " + Throwables.getStackTraceAsString(ex.getCause());
-        if (DistributedDataStore.class.isAssignableFrom(testParameter)) {
-            assertTrue(msg, Throwables.getRootCause(ex) instanceof NoShardLeaderException
-                || ex.getCause() instanceof ShardLeaderNotRespondingException);
-        } else {
-            assertThat(msg, Throwables.getRootCause(ex), instanceOf(RequestTimeoutException.class));
-        }
+        assertThat("Unexpected exception: " + Throwables.getStackTraceAsString(ex.getCause()),
+            Throwables.getRootCause(ex), instanceOf(RequestTimeoutException.class));
     }
 
     @Test
@@ -1210,12 +1165,8 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
 
         final var ex = assertThrows(ExecutionException.class, () -> followerTestKit.doCommit(rwTx.ready()));
-        final String msg = "Unexpected exception: " + Throwables.getStackTraceAsString(ex.getCause());
-        if (DistributedDataStore.class.isAssignableFrom(testParameter)) {
-            assertThat(msg, Throwables.getRootCause(ex), instanceOf(NoShardLeaderException.class));
-        } else {
-            assertThat(msg, Throwables.getRootCause(ex), instanceOf(RequestTimeoutException.class));
-        }
+        assertThat("Unexpected exception: " + Throwables.getStackTraceAsString(ex.getCause()),
+            Throwables.getRootCause(ex), instanceOf(RequestTimeoutException.class));
     }
 
     @Test
@@ -1229,9 +1180,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         final IntegrationTestKit follower2TestKit = new IntegrationTestKit(
                 follower2System, follower2DatastoreContextBuilder, commitTimeout);
 
-        try (AbstractDataStore ds =
-                follower2TestKit.setupAbstractDataStore(
-                        testParameter, testName, MODULE_SHARDS_CARS_1_2_3, false, CARS)) {
+        try (var ds = follower2TestKit.setupDataStore(testParameter, testName, MODULE_SHARDS_CARS_1_2_3, false, CARS)) {
 
             followerTestKit.waitForMembersUp("member-1", "member-3");
             follower2TestKit.waitForMembersUp("member-1", "member-2");
@@ -1268,9 +1217,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         final IntegrationTestKit follower2TestKit = new IntegrationTestKit(
                 follower2System, follower2DatastoreContextBuilder, commitTimeout);
 
-        final AbstractDataStore ds2 =
-                     follower2TestKit.setupAbstractDataStore(
-                             testParameter, testName, MODULE_SHARDS_CARS_1_2_3, false, CARS);
+        final var ds2 = follower2TestKit.setupDataStore(testParameter, testName, MODULE_SHARDS_CARS_1_2_3, false, CARS);
 
         followerTestKit.waitForMembersUp("member-1", "member-3");
         follower2TestKit.waitForMembersUp("member-1", "member-2");
@@ -1278,7 +1225,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         // behavior is controlled by akka.coordinated-shutdown.run-by-actor-system-terminate configuration option
         TestKit.shutdownActorSystem(follower2System, true);
 
-        ActorRef cars = leaderDistributedDataStore.getActorUtils().findLocalShard("cars").get();
+        ActorRef cars = leaderDistributedDataStore.getActorUtils().findLocalShard("cars").orElseThrow();
         final OnDemandRaftState initialState = (OnDemandRaftState) leaderDistributedDataStore.getActorUtils()
                 .executeOperation(cars, GetOnDemandRaftState.INSTANCE);
 
@@ -1293,7 +1240,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         await().atMost(10, TimeUnit.SECONDS)
                 .until(() -> containsUnreachable(followerCluster, follower2Member));
 
-        ActorRef followerCars = followerDistributedDataStore.getActorUtils().findLocalShard("cars").get();
+        ActorRef followerCars = followerDistributedDataStore.getActorUtils().findLocalShard("cars").orElseThrow();
 
         // to simulate a follower not being able to receive messages, but still being able to send messages and becoming
         // candidate, we can just send a couple of RequestVotes to both leader and follower.
@@ -1336,7 +1283,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
                 CarsModel.newCarsMapNode(CarsModel.newCarEntry("optima", Uint64.valueOf(20000))));
         AbstractShardTest.writeToStore(tree, CarsModel.BASE_PATH, carsNode);
 
-        final NormalizedNode snapshotRoot = AbstractShardTest.readStore(tree, YangInstanceIdentifier.empty());
+        final NormalizedNode snapshotRoot = AbstractShardTest.readStore(tree, YangInstanceIdentifier.of());
         final Snapshot initialSnapshot = Snapshot.create(
                 new ShardSnapshotState(new MetadataShardDataTreeSnapshot(snapshotRoot)),
                 Collections.emptyList(), 5, 1, 5, 1, 1, null, null);
@@ -1359,9 +1306,6 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
 
     @Test
     public void testReadWriteMessageSlicing() throws Exception {
-        // The slicing is only implemented for tell-based protocol
-        assumeTrue(ClientBackedDataStore.class.isAssignableFrom(testParameter));
-
         leaderDatastoreContextBuilder.maximumMessageSliceSize(100);
         followerDatastoreContextBuilder.maximumMessageSliceSize(100);
         initDatastoresWithCars("testLargeReadReplySlicing");
@@ -1391,15 +1335,15 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         initialWriteTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
         leaderTestKit.doCommit(initialWriteTx.ready());
 
-        try (AbstractDataStore follower2DistributedDataStore = follower2TestKit.setupAbstractDataStore(
-                testParameter, testName, MODULE_SHARDS_CARS_1_2_3, false)) {
+        try (var follower2DistributedDataStore = follower2TestKit.setupDataStore(testParameter, testName,
+            MODULE_SHARDS_CARS_1_2_3, false)) {
 
             final ActorRef member3Cars = ((LocalShardStore) follower2DistributedDataStore).getLocalShards()
                     .getLocalShards().get("cars").getActor();
             final ActorRef member2Cars = ((LocalShardStore)followerDistributedDataStore).getLocalShards()
                     .getLocalShards().get("cars").getActor();
-            member2Cars.tell(new StartDropMessages(AppendEntries.class), null);
-            member3Cars.tell(new StartDropMessages(AppendEntries.class), null);
+            member2Cars.tell(new StartDropMessages<>(AppendEntries.class), null);
+            member3Cars.tell(new StartDropMessages<>(AppendEntries.class), null);
 
             final DOMStoreWriteTransaction newTx = leaderDistributedDataStore.newWriteOnlyTransaction();
             newTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
@@ -1427,8 +1371,8 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
                     "member-3-shard-cars-testRaftCallbackDuringLeadershipDrop", -1,
                             -1), member3Cars);
 
-            member2Cars.tell(new StopDropMessages(AppendEntries.class), null);
-            member3Cars.tell(new StopDropMessages(AppendEntries.class), null);
+            member2Cars.tell(new StopDropMessages<>(AppendEntries.class), null);
+            member3Cars.tell(new StopDropMessages<>(AppendEntries.class), null);
 
             await("Is tx stuck in COMMIT_PENDING")
                     .atMost(10, TimeUnit.SECONDS).untilAtomic(submitDone, equalTo(true));
@@ -1446,16 +1390,16 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
             followerDatastoreContextBuilder.snapshotOnRootOverwrite(true));
 
         leaderTestKit.waitForMembersUp("member-2");
-        final ContainerNode rootNode = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(YangInstanceIdentifier.NodeIdentifier.create(SchemaContext.NAME))
+        final ContainerNode rootNode = Builders.containerBuilder()
+                .withNodeIdentifier(NodeIdentifier.create(SchemaContext.NAME))
                 .withChild(CarsModel.create())
                 .build();
 
-        leaderTestKit.testWriteTransaction(leaderDistributedDataStore, YangInstanceIdentifier.empty(), rootNode);
+        leaderTestKit.testWriteTransaction(leaderDistributedDataStore, YangInstanceIdentifier.of(), rootNode);
 
         // FIXME: CONTROLLER-2020: ClientBackedDatastore does not have stable indexes/term,
         //                         the snapshot index seems to fluctuate
-        assumeTrue(DistributedDataStore.class.isAssignableFrom(testParameter));
+        assumeTrue(false);
         IntegrationTestKit.verifyShardState(leaderDistributedDataStore, "cars",
             state -> assertEquals(1, state.getSnapshotIndex()));
 
@@ -1481,7 +1425,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         verifySnapshot("member-2-shard-cars-testSnapshotOnRootOverwrite", 1);
 
         // root overwrite so expect a snapshot
-        leaderTestKit.testWriteTransaction(leaderDistributedDataStore, YangInstanceIdentifier.empty(), rootNode);
+        leaderTestKit.testWriteTransaction(leaderDistributedDataStore, YangInstanceIdentifier.of(), rootNode);
 
         // this was a real snapshot so everything should be in it(1(DisableTrackingPayload) + 1 + 10 + 1)
         IntegrationTestKit.verifyShardState(leaderDistributedDataStore, "cars",
@@ -1511,10 +1455,10 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         assertEquals("Snapshot state type", ShardSnapshotState.class, actual.getState().getClass());
         MetadataShardDataTreeSnapshot shardSnapshot =
                 (MetadataShardDataTreeSnapshot) ((ShardSnapshotState)actual.getState()).getSnapshot();
-        assertEquals("Snapshot root node", expRoot, shardSnapshot.getRootNode().get());
+        assertEquals("Snapshot root node", expRoot, shardSnapshot.getRootNode().orElseThrow());
     }
 
-    private static void sendDatastoreContextUpdate(final AbstractDataStore dataStore, final Builder builder) {
+    private static void sendDatastoreContextUpdate(final ClientBackedDataStore dataStore, final Builder builder) {
         final Builder newBuilder = DatastoreContext.newBuilderFrom(builder.build());
         final DatastoreContextFactory mockContextFactory = mock(DatastoreContextFactory.class);
         final Answer<DatastoreContext> answer = invocation -> newBuilder.build();
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreTest.java
deleted file mode 100644 (file)
index 56bbbf5..0000000
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-import akka.util.Timeout;
-import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.FrontendType;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import scala.concurrent.duration.FiniteDuration;
-
-public class DistributedDataStoreTest extends AbstractActorTest {
-    private static final ClientIdentifier UNKNOWN_ID = ClientIdentifier.create(
-            FrontendIdentifier.create(MemberName.forName("local"), FrontendType.forName("unknown")), 0);
-
-    private static SchemaContext SCHEMA_CONTEXT;
-
-    @Mock
-    private ActorUtils actorUtils;
-
-    @Mock
-    private DatastoreContext datastoreContext;
-
-    @Mock
-    private Timeout shardElectionTimeout;
-
-    @BeforeClass
-    public static void beforeClass() {
-        SCHEMA_CONTEXT = TestModel.createTestContext();
-    }
-
-    @AfterClass
-    public static void afterClass() {
-        SCHEMA_CONTEXT = null;
-    }
-
-    @Before
-    public void setUp() {
-        MockitoAnnotations.initMocks(this);
-
-        doReturn(SCHEMA_CONTEXT).when(actorUtils).getSchemaContext();
-        doReturn(DatastoreContext.newBuilder().build()).when(actorUtils).getDatastoreContext();
-    }
-
-    @Test
-    public void testRateLimitingUsedInReadWriteTxCreation() {
-        try (DistributedDataStore distributedDataStore = new DistributedDataStore(actorUtils, UNKNOWN_ID)) {
-
-            distributedDataStore.newReadWriteTransaction();
-
-            verify(actorUtils, times(1)).acquireTxCreationPermit();
-        }
-    }
-
-    @Test
-    public void testRateLimitingUsedInWriteOnlyTxCreation() {
-        try (DistributedDataStore distributedDataStore = new DistributedDataStore(actorUtils, UNKNOWN_ID)) {
-
-            distributedDataStore.newWriteOnlyTransaction();
-
-            verify(actorUtils, times(1)).acquireTxCreationPermit();
-        }
-    }
-
-    @Test
-    public void testRateLimitingNotUsedInReadOnlyTxCreation() {
-        try (DistributedDataStore distributedDataStore = new DistributedDataStore(actorUtils, UNKNOWN_ID)) {
-
-            distributedDataStore.newReadOnlyTransaction();
-            distributedDataStore.newReadOnlyTransaction();
-            distributedDataStore.newReadOnlyTransaction();
-
-            verify(actorUtils, times(0)).acquireTxCreationPermit();
-        }
-    }
-
-    @Test
-    public void testWaitTillReadyBlocking() {
-        doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
-        doReturn(shardElectionTimeout).when(datastoreContext).getShardLeaderElectionTimeout();
-        doReturn(1).when(datastoreContext).getInitialSettleTimeoutMultiplier();
-        doReturn(FiniteDuration.apply(50, TimeUnit.MILLISECONDS)).when(shardElectionTimeout).duration();
-        try (DistributedDataStore distributedDataStore = new DistributedDataStore(actorUtils, UNKNOWN_ID)) {
-
-            long start = System.currentTimeMillis();
-
-            distributedDataStore.waitTillReady();
-
-            long end = System.currentTimeMillis();
-
-            assertTrue("Expected to be blocked for 50 millis", end - start >= 50);
-        }
-    }
-
-    @Test
-    public void testWaitTillReadyCountDown() {
-        try (DistributedDataStore distributedDataStore = new DistributedDataStore(actorUtils, UNKNOWN_ID)) {
-            doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
-            doReturn(shardElectionTimeout).when(datastoreContext).getShardLeaderElectionTimeout();
-            doReturn(FiniteDuration.apply(5000, TimeUnit.MILLISECONDS)).when(shardElectionTimeout).duration();
-
-            Executors.newSingleThreadExecutor().submit(() -> {
-                Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
-                distributedDataStore.readinessFuture().set(null);
-            });
-
-            long start = System.currentTimeMillis();
-
-            distributedDataStore.waitTillReady();
-
-            long end = System.currentTimeMillis();
-
-            assertTrue("Expected to be released in 500 millis", end - start < 5000);
-        }
-    }
-}
index b5b3f92f59a8a82003605b0417f44cc1faecd170..b5fcc951aa87527387623b841e201faf7847de0f 100644 (file)
@@ -55,7 +55,7 @@ public class DistributedDataStoreWithSegmentedJournalIntegrationTest
     @Parameters(name = "{0}")
     public static Collection<Object[]> data() {
         return Arrays.asList(new Object[][] {
-                { TestDistributedDataStore.class }, { TestClientBackedDataStore.class }
+                { TestClientBackedDataStore.class }
         });
     }
 
@@ -96,8 +96,8 @@ public class DistributedDataStoreWithSegmentedJournalIntegrationTest
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
         CollectionNodeBuilder<MapEntryNode, SystemMapNode> carMapBuilder = ImmutableNodes.mapNodeBuilder(CAR_QNAME);
 
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-                testParameter, "testManyWritesDeletes", "module-shards-cars-member-1.conf", true, "cars")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testManyWritesDeletes",
+            "module-shards-cars-member-1.conf", true, "cars")) {
 
             DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
 
@@ -133,7 +133,7 @@ public class DistributedDataStoreWithSegmentedJournalIntegrationTest
 
             MapNode cars = carMapBuilder.build();
 
-            assertEquals("cars not matching result", cars, optional.get());
+            assertEquals("cars not matching result", cars, optional.orElseThrow());
 
             txChain.close();
 
@@ -155,16 +155,15 @@ public class DistributedDataStoreWithSegmentedJournalIntegrationTest
         }
 
         // test restoration from journal and verify data matches
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-                testParameter, "testManyWritesDeletes", "module-shards-cars-member-1.conf", true, "cars")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testManyWritesDeletes",
+            "module-shards-cars-member-1.conf", true, "cars")) {
 
             DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
             MapNode cars = carMapBuilder.build();
 
             final Optional<NormalizedNode> optional = txChain.newReadOnlyTransaction()
                     .read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("restored cars do not match snapshot", cars, optional.get());
+            assertEquals("restored cars do not match snapshot", Optional.of(cars), optional);
 
             txChain.close();
         }
index 14a0c3af1444df104cd42fdd41e01e82d02144e8..4c56c472e418589b371c039747d0ed0f1d4bed5b 100644 (file)
@@ -7,15 +7,15 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static org.junit.Assert.assertSame;
+import static org.mockito.Mockito.mock;
+
 import akka.actor.ActorRef;
-import java.util.Arrays;
-import java.util.Collection;
-import org.junit.Assert;
+import java.util.List;
 import org.junit.Test;
-import org.mockito.Mockito;
 import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
 import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 
 public class ForwardingDataTreeChangeListenerTest extends AbstractActorTest {
 
@@ -26,10 +26,10 @@ public class ForwardingDataTreeChangeListenerTest extends AbstractActorTest {
         ForwardingDataTreeChangeListener forwardingListener = new ForwardingDataTreeChangeListener(
                 getSystem().actorSelection(actorRef.path()), ActorRef.noSender());
 
-        Collection<DataTreeCandidate> expected = Arrays.asList(Mockito.mock(DataTreeCandidate.class));
+        List<DataTreeCandidate> expected = List.of(mock(DataTreeCandidate.class));
         forwardingListener.onDataTreeChanged(expected);
 
         DataTreeChanged actual = MessageCollectorActor.expectFirstMatching(actorRef, DataTreeChanged.class, 5000);
-        Assert.assertSame(expected, actual.getChanges());
+        assertSame(expected, actual.getChanges());
     }
 }
index 8a5705ded73faa2f358689e10418a69b3012a64a..7786f13948f010a53d9686a4f969046fcc54d0ff 100644 (file)
@@ -35,7 +35,7 @@ import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 public class FrontendReadWriteTransactionTest {
 
@@ -133,7 +133,7 @@ public class FrontendReadWriteTransactionTest {
         assertNotNull(handleRequest(readyReq));
         verify(mockParent).finishTransaction(same(shardTransaction), eq(Optional.empty()));
 
-        handleRequest(new ReadTransactionRequest(TX_ID, 0, mock(ActorRef.class), YangInstanceIdentifier.empty(), true));
+        handleRequest(new ReadTransactionRequest(TX_ID, 0, mock(ActorRef.class), YangInstanceIdentifier.of(), true));
     }
 
     @Test(expected = IllegalStateException.class)
@@ -160,6 +160,6 @@ public class FrontendReadWriteTransactionTest {
         assertNull(handleRequest(abortReq));
         verify(mockParent).abortTransaction(same(shardTransaction), any(Runnable.class));
 
-        handleRequest(new ReadTransactionRequest(TX_ID, 0, mock(ActorRef.class), YangInstanceIdentifier.empty(), true));
+        handleRequest(new ReadTransactionRequest(TX_ID, 0, mock(ActorRef.class), YangInstanceIdentifier.of(), true));
     }
 }
index 72cfdceeeaff415dd86d189bef84dae0b33ed207..63e9ba72a74fb2606477d6a9caabe44ad7975abf 100644 (file)
@@ -25,7 +25,6 @@ import com.google.common.base.Stopwatch;
 import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.Uninterruptibles;
-import java.lang.reflect.Constructor;
 import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
@@ -34,7 +33,6 @@ import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
 import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
 import org.opendaylight.controller.cluster.datastore.config.Configuration;
 import org.opendaylight.controller.cluster.datastore.config.ConfigurationImpl;
-import org.opendaylight.controller.cluster.datastore.config.EmptyModuleShardConfigProvider;
 import org.opendaylight.controller.cluster.datastore.messages.OnDemandShardState;
 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
@@ -77,76 +75,50 @@ public class IntegrationTestKit extends ShardTestKit {
         return datastoreContextBuilder;
     }
 
-    public DistributedDataStore setupDistributedDataStore(final String typeName, final String moduleShardsConfig,
-                                                          final boolean waitUntilLeader,
-                                                          final EffectiveModelContext schemaContext) throws Exception {
-        return setupDistributedDataStore(typeName, moduleShardsConfig, "modules.conf", waitUntilLeader, schemaContext);
+    public ClientBackedDataStore setupDataStore(final Class<? extends ClientBackedDataStore> implementation,
+            final String typeName, final String... shardNames) throws Exception {
+        return setupDataStore(implementation, typeName, "module-shards.conf", true, SchemaContextHelper.full(),
+            shardNames);
     }
 
-    public DistributedDataStore setupDistributedDataStore(final String typeName, final String moduleShardsConfig,
-                                                          final String modulesConfig,
-                                                          final boolean waitUntilLeader,
-                                                          final EffectiveModelContext schemaContext,
-                                                          final String... shardNames) throws Exception {
-        return (DistributedDataStore) setupAbstractDataStore(DistributedDataStore.class, typeName, moduleShardsConfig,
-                modulesConfig, waitUntilLeader, schemaContext, shardNames);
+    public ClientBackedDataStore setupDataStore(final Class<? extends ClientBackedDataStore> implementation,
+            final String typeName, final boolean waitUntilLeader, final String... shardNames) throws Exception {
+        return setupDataStore(implementation, typeName, "module-shards.conf", waitUntilLeader,
+            SchemaContextHelper.full(), shardNames);
     }
 
-    public AbstractDataStore setupAbstractDataStore(final Class<? extends AbstractDataStore> implementation,
-                                                    final String typeName, final String... shardNames)
-            throws Exception {
-        return setupAbstractDataStore(implementation, typeName, "module-shards.conf", true,
-                SchemaContextHelper.full(), shardNames);
-    }
-
-    public AbstractDataStore setupAbstractDataStore(final Class<? extends AbstractDataStore> implementation,
-                                                    final String typeName, final boolean waitUntilLeader,
-                                                    final String... shardNames) throws Exception {
-        return setupAbstractDataStore(implementation, typeName, "module-shards.conf", waitUntilLeader,
-                SchemaContextHelper.full(), shardNames);
-    }
-
-    public AbstractDataStore setupAbstractDataStore(final Class<? extends AbstractDataStore> implementation,
-                                                    final String typeName, final String moduleShardsConfig,
-                                                    final boolean waitUntilLeader, final String... shardNames)
-            throws Exception {
-        return setupAbstractDataStore(implementation, typeName, moduleShardsConfig, waitUntilLeader,
-                SchemaContextHelper.full(), shardNames);
+    public ClientBackedDataStore setupDataStore(final Class<? extends ClientBackedDataStore> implementation,
+            final String typeName, final String moduleShardsConfig, final boolean waitUntilLeader,
+            final String... shardNames) throws Exception {
+        return setupDataStore(implementation, typeName, moduleShardsConfig, waitUntilLeader,
+            SchemaContextHelper.full(), shardNames);
     }
 
-    public AbstractDataStore setupAbstractDataStore(final Class<? extends AbstractDataStore> implementation,
-                                                    final String typeName, final String moduleShardsConfig,
-                                                    final boolean waitUntilLeader,
-                                                    final EffectiveModelContext schemaContext,
-                                                    final String... shardNames) throws Exception {
-        return setupAbstractDataStore(implementation, typeName, moduleShardsConfig, "modules.conf", waitUntilLeader,
-                schemaContext, shardNames);
+    public ClientBackedDataStore setupDataStore(final Class<? extends ClientBackedDataStore> implementation,
+            final String typeName, final String moduleShardsConfig, final boolean waitUntilLeader,
+            final EffectiveModelContext schemaContext, final String... shardNames) throws Exception {
+        return setupDataStore(implementation, typeName, moduleShardsConfig, "modules.conf", waitUntilLeader,
+            schemaContext, shardNames);
     }
 
-    private AbstractDataStore setupAbstractDataStore(final Class<? extends AbstractDataStore> implementation,
-                                                     final String typeName, final String moduleShardsConfig,
-                                                     final String modulesConfig, final boolean waitUntilLeader,
-                                                     final EffectiveModelContext schemaContext,
-                                                     final String... shardNames)
-            throws Exception {
+    private ClientBackedDataStore setupDataStore(final Class<? extends ClientBackedDataStore> implementation,
+            final String typeName, final String moduleShardsConfig, final String modulesConfig,
+            final boolean waitUntilLeader, final EffectiveModelContext schemaContext, final String... shardNames)
+                throws Exception {
         final ClusterWrapper cluster = new ClusterWrapperImpl(getSystem());
         final Configuration config = new ConfigurationImpl(moduleShardsConfig, modulesConfig);
 
         setDataStoreName(typeName);
 
-        // Make sure we set up datastore context correctly
-        datastoreContextBuilder.useTellBasedProtocol(ClientBackedDataStore.class.isAssignableFrom(implementation));
-
         final DatastoreContext datastoreContext = datastoreContextBuilder.build();
         final DatastoreContextFactory mockContextFactory = mock(DatastoreContextFactory.class);
         doReturn(datastoreContext).when(mockContextFactory).getBaseDatastoreContext();
         doReturn(datastoreContext).when(mockContextFactory).getShardDatastoreContext(anyString());
 
-        final Constructor<? extends AbstractDataStore> constructor = implementation.getDeclaredConstructor(
-                ActorSystem.class, ClusterWrapper.class, Configuration.class,
-                DatastoreContextFactory.class, DatastoreSnapshot.class);
+        final var constructor = implementation.getDeclaredConstructor(ActorSystem.class, ClusterWrapper.class,
+            Configuration.class, DatastoreContextFactory.class, DatastoreSnapshot.class);
 
-        final AbstractDataStore dataStore = constructor.newInstance(getSystem(), cluster, config, mockContextFactory,
+        final var dataStore = constructor.newInstance(getSystem(), cluster, config, mockContextFactory,
             restoreFromSnapshot);
 
         dataStore.onModelContextUpdated(schemaContext);
@@ -169,52 +141,6 @@ public class IntegrationTestKit extends ShardTestKit {
         }
     }
 
-    public DistributedDataStore setupDistributedDataStoreWithoutConfig(final String typeName,
-                                                                       final EffectiveModelContext schemaContext) {
-        final ClusterWrapper cluster = new ClusterWrapperImpl(getSystem());
-        final ConfigurationImpl configuration = new ConfigurationImpl(new EmptyModuleShardConfigProvider());
-
-        setDataStoreName(typeName);
-
-        final DatastoreContext datastoreContext = getDatastoreContextBuilder().build();
-
-        final DatastoreContextFactory mockContextFactory = mock(DatastoreContextFactory.class);
-        doReturn(datastoreContext).when(mockContextFactory).getBaseDatastoreContext();
-        doReturn(datastoreContext).when(mockContextFactory).getShardDatastoreContext(anyString());
-
-        final DistributedDataStore dataStore = new DistributedDataStore(getSystem(), cluster,
-                configuration, mockContextFactory, restoreFromSnapshot);
-
-        dataStore.onModelContextUpdated(schemaContext);
-
-        datastoreContextBuilder = DatastoreContext.newBuilderFrom(datastoreContext);
-        return dataStore;
-    }
-
-    public DistributedDataStore setupDistributedDataStoreWithoutConfig(final String typeName,
-                                                                       final EffectiveModelContext schemaContext,
-                                                                       final LogicalDatastoreType storeType) {
-        final ClusterWrapper cluster = new ClusterWrapperImpl(getSystem());
-        final ConfigurationImpl configuration = new ConfigurationImpl(new EmptyModuleShardConfigProvider());
-
-        setDataStoreName(typeName);
-
-        final DatastoreContext datastoreContext =
-                getDatastoreContextBuilder().logicalStoreType(storeType).build();
-
-        final DatastoreContextFactory mockContextFactory = mock(DatastoreContextFactory.class);
-        doReturn(datastoreContext).when(mockContextFactory).getBaseDatastoreContext();
-        doReturn(datastoreContext).when(mockContextFactory).getShardDatastoreContext(anyString());
-
-        final DistributedDataStore dataStore = new DistributedDataStore(getSystem(), cluster,
-                configuration, mockContextFactory, restoreFromSnapshot);
-
-        dataStore.onModelContextUpdated(schemaContext);
-
-        datastoreContextBuilder = DatastoreContext.newBuilderFrom(datastoreContext);
-        return dataStore;
-    }
-
     public void waitUntilLeader(final ActorUtils actorUtils, final String... shardNames) {
         for (String shardName: shardNames) {
             ActorRef shard = findLocalShard(actorUtils, shardName);
@@ -253,15 +179,14 @@ public class IntegrationTestKit extends ShardTestKit {
     }
 
     public static ActorRef findLocalShard(final ActorUtils actorUtils, final String shardName) {
-        ActorRef shard = null;
-        for (int i = 0; i < 20 * 5 && shard == null; i++) {
+        for (int i = 0; i < 20 * 5; i++) {
             Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
             Optional<ActorRef> shardReply = actorUtils.findLocalShard(shardName);
             if (shardReply.isPresent()) {
-                shard = shardReply.get();
+                return shardReply.orElseThrow();
             }
         }
-        return shard;
+        return null;
     }
 
     public static void waitUntilShardIsDown(final ActorUtils actorUtils, final String shardName) {
@@ -277,7 +202,7 @@ public class IntegrationTestKit extends ShardTestKit {
         throw new IllegalStateException("Shard[" + shardName + " did not shutdown in time");
     }
 
-    public static void verifyShardStats(final AbstractDataStore datastore, final String shardName,
+    public static void verifyShardStats(final ClientBackedDataStore datastore, final String shardName,
             final ShardStatsVerifier verifier) throws Exception {
         ActorUtils actorUtils = datastore.getActorUtils();
 
@@ -302,7 +227,7 @@ public class IntegrationTestKit extends ShardTestKit {
         throw lastError;
     }
 
-    public static void verifyShardState(final AbstractDataStore datastore, final String shardName,
+    public static void verifyShardState(final ClientBackedDataStore datastore, final String shardName,
             final Consumer<OnDemandShardState> verifier) throws Exception {
         ActorUtils actorUtils = datastore.getActorUtils();
 
@@ -327,7 +252,7 @@ public class IntegrationTestKit extends ShardTestKit {
         throw lastError;
     }
 
-    void testWriteTransaction(final AbstractDataStore dataStore, final YangInstanceIdentifier nodePath,
+    void testWriteTransaction(final ClientBackedDataStore dataStore, final YangInstanceIdentifier nodePath,
             final NormalizedNode nodeToWrite) throws Exception {
 
         // 1. Create a write-only Tx
index fc5665b925bc290fd2e8b2a93d0c75e98a8b510b..1e51e9cc9a2fd876125145ad137480481f327d55 100644 (file)
@@ -25,11 +25,11 @@ import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
 import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry;
 import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStoreProperties.ExportOnRecovery;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStoreProperties.ExportOnRecovery;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 
 public class JsonExportTest extends AbstractShardTest {
     private static final String DUMMY_DATA = "Dummy data as snapshot sequence number is set to 0 in "
@@ -47,7 +47,7 @@ public class JsonExportTest extends AbstractShardTest {
     @Before
     public void setUp() throws Exception {
         super.setUp();
-        final File exportTmpFolder = temporaryFolder.newFolder("persistence-export");
+        final var exportTmpFolder = temporaryFolder.newFolder("persistence-export");
         actualJournalFilePath = exportTmpFolder.getAbsolutePath() + "/journals/"
             + "member-1-shard-inventory-config" + nextShardNum + "-journal.json";
         actualSnapshotFilePath = exportTmpFolder.getAbsolutePath() + "/snapshots/"
@@ -66,10 +66,12 @@ public class JsonExportTest extends AbstractShardTest {
     @Test
     public void testJsonExport() throws Exception {
         // Set up the InMemorySnapshotStore.
-        final DataTree source = setupInMemorySnapshotStore();
+        final var source = setupInMemorySnapshotStore();
 
-        final DataTreeModification writeMod = source.takeSnapshot().newModification();
-        writeMod.write(TestModel.OUTER_LIST_PATH, ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build());
+        final var writeMod = source.takeSnapshot().newModification();
+        writeMod.write(TestModel.OUTER_LIST_PATH, ImmutableNodes.newSystemMapBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.OUTER_LIST_QNAME))
+            .build());
         writeMod.ready();
         InMemoryJournal.addEntry(shardID.toString(), 0, DUMMY_DATA);
 
@@ -82,13 +84,18 @@ public class JsonExportTest extends AbstractShardTest {
 
         // Add some ModificationPayload entries
         for (int i = 1; i <= nListEntries; i++) {
-            listEntryKeys.add(i);
-
-            final YangInstanceIdentifier path = YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
-                    .nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, i).build();
-
-            final DataTreeModification mod = source.takeSnapshot().newModification();
-            mod.merge(path, ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, i));
+            final Integer value = i;
+            listEntryKeys.add(value);
+
+            final var path = YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
+                    .nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, value).build();
+
+            final var mod = source.takeSnapshot().newModification();
+            mod.merge(path, ImmutableNodes.newMapEntryBuilder()
+                .withNodeIdentifier(
+                    NodeIdentifierWithPredicates.of(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, value))
+                .withChild(ImmutableNodes.leafNode(TestModel.ID_QNAME, value))
+                .build());
             mod.ready();
 
             InMemoryJournal.addEntry(shardID.toString(), i + 1, new SimpleReplicatedLogEntry(i, 1,
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/LocalTransactionContextTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/LocalTransactionContextTest.java
deleted file mode 100644 (file)
index 48e4017..0000000
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-
-import akka.actor.ActorSelection;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.Optional;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.mockito.Mock;
-import org.mockito.junit.MockitoJUnitRunner;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.FrontendType;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.DataExists;
-import org.opendaylight.controller.cluster.datastore.messages.ReadData;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.util.concurrent.FluentFutures;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import scala.concurrent.Future;
-
-@RunWith(MockitoJUnitRunner.StrictStubs.class)
-public class LocalTransactionContextTest {
-    @Mock
-    private DOMStoreReadWriteTransaction readWriteTransaction;
-    @Mock
-    private LocalTransactionReadySupport mockReadySupport;
-
-    private LocalTransactionContext localTransactionContext;
-
-    @Before
-    public void setUp() {
-        final TransactionIdentifier txId = new TransactionIdentifier(new LocalHistoryIdentifier(ClientIdentifier.create(
-            FrontendIdentifier.create(MemberName.forName("member"), FrontendType.forName("type")), 0), 0), 0);
-
-        localTransactionContext = new LocalTransactionContext(readWriteTransaction, txId, mockReadySupport) {
-            @Override
-            DOMStoreWriteTransaction getWriteDelegate() {
-                return readWriteTransaction;
-            }
-
-            @Override
-            DOMStoreReadTransaction getReadDelegate() {
-                return readWriteTransaction;
-            }
-        };
-    }
-
-    @Test
-    public void testWrite() {
-        YangInstanceIdentifier yangInstanceIdentifier = YangInstanceIdentifier.empty();
-        NormalizedNode normalizedNode = mock(NormalizedNode.class);
-        localTransactionContext.executeWrite(yangInstanceIdentifier, normalizedNode, null);
-        verify(readWriteTransaction).write(yangInstanceIdentifier, normalizedNode);
-    }
-
-    @Test
-    public void testMerge() {
-        YangInstanceIdentifier yangInstanceIdentifier = YangInstanceIdentifier.empty();
-        NormalizedNode normalizedNode = mock(NormalizedNode.class);
-        localTransactionContext.executeMerge(yangInstanceIdentifier, normalizedNode, null);
-        verify(readWriteTransaction).merge(yangInstanceIdentifier, normalizedNode);
-    }
-
-    @Test
-    public void testDelete() {
-        YangInstanceIdentifier yangInstanceIdentifier = YangInstanceIdentifier.empty();
-        localTransactionContext.executeDelete(yangInstanceIdentifier, null);
-        verify(readWriteTransaction).delete(yangInstanceIdentifier);
-    }
-
-    @Test
-    public void testRead() {
-        YangInstanceIdentifier yangInstanceIdentifier = YangInstanceIdentifier.empty();
-        NormalizedNode normalizedNode = mock(NormalizedNode.class);
-        doReturn(FluentFutures.immediateFluentFuture(Optional.of(normalizedNode))).when(readWriteTransaction)
-            .read(yangInstanceIdentifier);
-        localTransactionContext.executeRead(new ReadData(yangInstanceIdentifier, DataStoreVersions.CURRENT_VERSION),
-                SettableFuture.create(), null);
-        verify(readWriteTransaction).read(yangInstanceIdentifier);
-    }
-
-    @Test
-    public void testExists() {
-        YangInstanceIdentifier yangInstanceIdentifier = YangInstanceIdentifier.empty();
-        doReturn(FluentFutures.immediateTrueFluentFuture()).when(readWriteTransaction).exists(yangInstanceIdentifier);
-        localTransactionContext.executeRead(new DataExists(yangInstanceIdentifier, DataStoreVersions.CURRENT_VERSION),
-                SettableFuture.create(), null);
-        verify(readWriteTransaction).exists(yangInstanceIdentifier);
-    }
-
-    @Test
-    public void testReady() {
-        final LocalThreePhaseCommitCohort mockCohort = mock(LocalThreePhaseCommitCohort.class);
-        doReturn(akka.dispatch.Futures.successful(null)).when(mockCohort).initiateCoordinatedCommit(Optional.empty());
-        doReturn(mockCohort).when(mockReadySupport).onTransactionReady(readWriteTransaction, null);
-
-        Future<ActorSelection> future = localTransactionContext.readyTransaction(null, Optional.empty());
-        assertTrue(future.isCompleted());
-
-        verify(mockReadySupport).onTransactionReady(readWriteTransaction, null);
-    }
-
-    @Test
-    public void testReadyWithWriteError() {
-        YangInstanceIdentifier yangInstanceIdentifier = YangInstanceIdentifier.empty();
-        NormalizedNode normalizedNode = mock(NormalizedNode.class);
-        RuntimeException error = new RuntimeException("mock");
-        doThrow(error).when(readWriteTransaction).write(yangInstanceIdentifier, normalizedNode);
-
-        localTransactionContext.executeWrite(yangInstanceIdentifier, normalizedNode, null);
-        localTransactionContext.executeWrite(yangInstanceIdentifier, normalizedNode, null);
-
-        verify(readWriteTransaction).write(yangInstanceIdentifier, normalizedNode);
-
-        doReadyWithExpectedError(error);
-    }
-
-    @Test
-    public void testReadyWithMergeError() {
-        YangInstanceIdentifier yangInstanceIdentifier = YangInstanceIdentifier.empty();
-        NormalizedNode normalizedNode = mock(NormalizedNode.class);
-        RuntimeException error = new RuntimeException("mock");
-        doThrow(error).when(readWriteTransaction).merge(yangInstanceIdentifier, normalizedNode);
-
-        localTransactionContext.executeMerge(yangInstanceIdentifier, normalizedNode, null);
-        localTransactionContext.executeMerge(yangInstanceIdentifier, normalizedNode, null);
-
-        verify(readWriteTransaction).merge(yangInstanceIdentifier, normalizedNode);
-
-        doReadyWithExpectedError(error);
-    }
-
-    @Test
-    public void testReadyWithDeleteError() {
-        YangInstanceIdentifier yangInstanceIdentifier = YangInstanceIdentifier.empty();
-        RuntimeException error = new RuntimeException("mock");
-        doThrow(error).when(readWriteTransaction).delete(yangInstanceIdentifier);
-
-        localTransactionContext.executeDelete(yangInstanceIdentifier, null);
-        localTransactionContext.executeDelete(yangInstanceIdentifier, null);
-
-        verify(readWriteTransaction).delete(yangInstanceIdentifier);
-
-        doReadyWithExpectedError(error);
-    }
-
-    private void doReadyWithExpectedError(final RuntimeException expError) {
-        LocalThreePhaseCommitCohort mockCohort = mock(LocalThreePhaseCommitCohort.class);
-        doReturn(akka.dispatch.Futures.successful(null)).when(mockCohort).initiateCoordinatedCommit(Optional.empty());
-        doReturn(mockCohort).when(mockReadySupport).onTransactionReady(readWriteTransaction, expError);
-
-        localTransactionContext.readyTransaction(null, Optional.empty());
-    }
-}
index a7c702c6dcb4f39da89dc7e943ae20b93dc4b7b8..4466493857fa9d1e85f5470b45f56260fe756270 100644 (file)
@@ -28,6 +28,7 @@ import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
+import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
@@ -51,8 +52,8 @@ public class MemberNode {
     private static final String MEMBER_1_ADDRESS = "akka://cluster-test@127.0.0.1:2558";
 
     private IntegrationTestKit kit;
-    private AbstractDataStore configDataStore;
-    private AbstractDataStore operDataStore;
+    private ClientBackedDataStore configDataStore;
+    private ClientBackedDataStore operDataStore;
     private DatastoreContext.Builder datastoreContextBuilder;
     private boolean cleanedUp;
 
@@ -72,12 +73,12 @@ public class MemberNode {
     }
 
 
-    public AbstractDataStore configDataStore() {
+    public ClientBackedDataStore configDataStore() {
         return configDataStore;
     }
 
 
-    public AbstractDataStore operDataStore() {
+    public ClientBackedDataStore operDataStore() {
         return operDataStore;
     }
 
@@ -124,14 +125,14 @@ public class MemberNode {
             }
 
             try {
-                IntegrationTestKit.shutdownActorSystem(kit.getSystem(), Boolean.TRUE);
+                IntegrationTestKit.shutdownActorSystem(kit.getSystem(), true);
             } catch (RuntimeException e) {
                 LoggerFactory.getLogger(MemberNode.class).warn("Failed to shutdown actor system", e);
             }
         }
     }
 
-    public static void verifyRaftState(final AbstractDataStore datastore, final String shardName,
+    public static void verifyRaftState(final ClientBackedDataStore datastore, final String shardName,
             final RaftStateVerifier verifier) throws Exception {
         ActorUtils actorUtils = datastore.getActorUtils();
 
@@ -156,7 +157,7 @@ public class MemberNode {
         throw lastError;
     }
 
-    public static void verifyRaftPeersPresent(final AbstractDataStore datastore, final String shardName,
+    public static void verifyRaftPeersPresent(final ClientBackedDataStore datastore, final String shardName,
             final String... peerMemberNames) throws Exception {
         final Set<String> peerIds = new HashSet<>();
         for (String p: peerMemberNames) {
@@ -168,7 +169,7 @@ public class MemberNode {
             raftState.getPeerAddresses().keySet()));
     }
 
-    public static void verifyNoShardPresent(final AbstractDataStore datastore, final String shardName) {
+    public static void verifyNoShardPresent(final ClientBackedDataStore datastore, final String shardName) {
         Stopwatch sw = Stopwatch.createStarted();
         while (sw.elapsed(TimeUnit.SECONDS) <= 5) {
             Optional<ActorRef> shardReply = datastore.getActorUtils().findLocalShard(shardName);
@@ -204,7 +205,7 @@ public class MemberNode {
          * @return this Builder
          */
         public Builder moduleShardsConfig(final String newModuleShardsConfig) {
-            this.moduleShardsConfig = newModuleShardsConfig;
+            moduleShardsConfig = newModuleShardsConfig;
             return this;
         }
 
@@ -214,7 +215,7 @@ public class MemberNode {
          * @return this Builder
          */
         public Builder akkaConfig(final String newAkkaConfig) {
-            this.akkaConfig = newAkkaConfig;
+            akkaConfig = newAkkaConfig;
             return this;
         }
 
@@ -224,7 +225,7 @@ public class MemberNode {
          * @return this Builder
          */
         public Builder useAkkaArtery(final boolean newUseAkkaArtery) {
-            this.useAkkaArtery = newUseAkkaArtery;
+            useAkkaArtery = newUseAkkaArtery;
             return this;
         }
 
@@ -234,7 +235,7 @@ public class MemberNode {
          * @return this Builder
          */
         public Builder testName(final String newTestName) {
-            this.testName = newTestName;
+            testName = newTestName;
             return this;
         }
 
@@ -244,7 +245,7 @@ public class MemberNode {
          * @return this Builder
          */
         public Builder waitForShardLeader(final String... shardNames) {
-            this.waitForshardLeader = shardNames;
+            waitForshardLeader = shardNames;
             return this;
         }
 
@@ -254,7 +255,7 @@ public class MemberNode {
          * @return this Builder
          */
         public Builder createOperDatastore(final boolean value) {
-            this.createOperDatastore = value;
+            createOperDatastore = value;
             return this;
         }
 
@@ -264,7 +265,7 @@ public class MemberNode {
          * @return this Builder
          */
         public Builder schemaContext(final EffectiveModelContext newSchemaContext) {
-            this.schemaContext = newSchemaContext;
+            schemaContext = newSchemaContext;
             return this;
         }
 
@@ -307,12 +308,12 @@ public class MemberNode {
 
             String memberName = new ClusterWrapperImpl(system).getCurrentMemberName().getName();
             node.kit.getDatastoreContextBuilder().shardManagerPersistenceId("shard-manager-config-" + memberName);
-            node.configDataStore = node.kit.setupAbstractDataStore(DistributedDataStore.class,
-                    "config_" + testName, moduleShardsConfig, true, schemaContext, waitForshardLeader);
+            node.configDataStore = node.kit.setupDataStore(ClientBackedDataStore.class, "config_" + testName,
+                moduleShardsConfig, true, schemaContext, waitForshardLeader);
 
             if (createOperDatastore) {
                 node.kit.getDatastoreContextBuilder().shardManagerPersistenceId("shard-manager-oper-" + memberName);
-                node.operDataStore = node.kit.setupAbstractDataStore(DistributedDataStore.class,
+                node.operDataStore = node.kit.setupDataStore(ClientBackedDataStore.class,
                         "oper_" + testName, moduleShardsConfig, true, schemaContext, waitForshardLeader);
             }
 
@@ -5,38 +5,16 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore;
 
 import java.util.concurrent.atomic.AtomicReference;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 interface OperationCallback {
-    OperationCallback NO_OP_CALLBACK = new OperationCallback() {
-        @Override
-        public void run() {
-        }
-
-        @Override
-        public void success() {
-        }
-
-        @Override
-        public void failure() {
-        }
-
-        @Override
-        public void pause() {
-        }
-
-        @Override
-        public void resume() {
-        }
-    };
-
     class Reference extends AtomicReference<OperationCallback> {
         private static final long serialVersionUID = 1L;
 
-        Reference(OperationCallback initialValue) {
+        Reference(final OperationCallback initialValue) {
             super(initialValue);
         }
     }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContextTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContextTest.java
deleted file mode 100644 (file)
index 6eedc7d..0000000
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright (c) 2017 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.mock;
-
-import akka.actor.ActorRef;
-import akka.actor.Status.Failure;
-import akka.dispatch.ExecutionContexts;
-import akka.dispatch.OnComplete;
-import akka.testkit.javadsl.TestKit;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.FrontendType;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.config.Configuration;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.DataExists;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import scala.concurrent.Await;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
-/**
- * Test whether RmoteTransactionContext operates correctly.
- */
-public class RemoteTransactionContextTest extends AbstractActorTest {
-    private static final TransactionIdentifier TX_ID = new TransactionIdentifier(new LocalHistoryIdentifier(
-        ClientIdentifier.create(FrontendIdentifier.create(MemberName.forName("test"), FrontendType.forName("test")), 0),
-        0), 0);
-
-    private OperationLimiter limiter;
-    private RemoteTransactionContext txContext;
-    private ActorUtils actorUtils;
-    private TestKit kit;
-
-    @Before
-    public void before() {
-        kit = new TestKit(getSystem());
-        actorUtils = Mockito.spy(new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
-            mock(Configuration.class)));
-        limiter = new OperationLimiter(TX_ID, 4, 0);
-        txContext = new RemoteTransactionContext(TX_ID, actorUtils.actorSelection(kit.getRef().path()), actorUtils,
-            DataStoreVersions.CURRENT_VERSION, limiter);
-        txContext.operationHandOffComplete();
-    }
-
-    /**
-     * OperationLimiter should be correctly released when a failure, like AskTimeoutException occurs. Future reads
-     * need to complete immediately with the failure and modifications should not be throttled and thrown away
-     * immediately.
-     */
-    @Test
-    public void testLimiterOnFailure() throws TimeoutException, InterruptedException {
-        txContext.executeDelete(null, null);
-        txContext.executeDelete(null, null);
-        assertEquals(2, limiter.availablePermits());
-
-        final Future<Object> sendFuture = txContext.sendBatchedModifications();
-        assertEquals(2, limiter.availablePermits());
-
-        BatchedModifications msg = kit.expectMsgClass(BatchedModifications.class);
-        assertEquals(2, msg.getModifications().size());
-        assertEquals(1, msg.getTotalMessagesSent());
-        sendReply(new Failure(new NullPointerException()));
-        assertFuture(sendFuture, new OnComplete<>() {
-            @Override
-            public void onComplete(final Throwable failure, final Object success) {
-                assertTrue(failure instanceof NullPointerException);
-                assertEquals(4, limiter.availablePermits());
-
-                // The transaction has failed, no throttling should occur
-                txContext.executeDelete(null, null);
-                assertEquals(4, limiter.availablePermits());
-
-                // Executing a read should result in immediate failure
-                final SettableFuture<Boolean> readFuture = SettableFuture.create();
-                txContext.executeRead(new DataExists(), readFuture, null);
-                assertTrue(readFuture.isDone());
-                try {
-                    readFuture.get();
-                    fail("Read future did not fail");
-                } catch (ExecutionException | InterruptedException e) {
-                    assertTrue(e.getCause() instanceof NullPointerException);
-                }
-            }
-        });
-
-        final Future<Object> commitFuture = txContext.directCommit(null);
-
-        msg = kit.expectMsgClass(BatchedModifications.class);
-        // Modification should have been thrown away by the dropped transmit induced by executeRead()
-        assertEquals(0, msg.getModifications().size());
-        assertTrue(msg.isDoCommitOnReady());
-        assertTrue(msg.isReady());
-        assertEquals(2, msg.getTotalMessagesSent());
-        sendReply(new Failure(new IllegalStateException()));
-        assertFuture(commitFuture, new OnComplete<>() {
-            @Override
-            public void onComplete(final Throwable failure, final Object success) {
-                assertTrue(failure instanceof IllegalStateException);
-            }
-        });
-
-        kit.expectNoMessage();
-    }
-
-    /**
-     * OperationLimiter gives up throttling at some point -- {@link RemoteTransactionContext} needs to deal with that
-     * case, too.
-     */
-    @Test
-    public void testLimiterOnOverflowFailure() throws TimeoutException, InterruptedException {
-        txContext.executeDelete(null, null);
-        txContext.executeDelete(null, null);
-        txContext.executeDelete(null, null);
-        txContext.executeDelete(null, null);
-        assertEquals(0, limiter.availablePermits());
-        txContext.executeDelete(null, null);
-        // Last acquire should have failed ...
-        assertEquals(0, limiter.availablePermits());
-
-        final Future<Object> future = txContext.sendBatchedModifications();
-        assertEquals(0, limiter.availablePermits());
-
-        BatchedModifications msg = kit.expectMsgClass(BatchedModifications.class);
-        // ... so we are sending 5 modifications ...
-        assertEquals(5, msg.getModifications().size());
-        assertEquals(1, msg.getTotalMessagesSent());
-        sendReply(new Failure(new NullPointerException()));
-
-        assertFuture(future, new OnComplete<>() {
-            @Override
-            public void onComplete(final Throwable failure, final Object success) {
-                assertTrue(failure instanceof NullPointerException);
-                // ... but they account for only 4 permits.
-                assertEquals(4, limiter.availablePermits());
-            }
-        });
-
-        kit.expectNoMessage();
-    }
-
-    private void sendReply(final Object message) {
-        final ActorRef askActor = kit.getLastSender();
-        kit.watch(askActor);
-        kit.reply(new Failure(new IllegalStateException()));
-        kit.expectTerminated(askActor);
-    }
-
-    private static void assertFuture(final Future<Object> future, final OnComplete<Object> complete)
-            throws TimeoutException, InterruptedException {
-        Await.ready(future, FiniteDuration.apply(3, TimeUnit.SECONDS));
-        future.onComplete(complete, ExecutionContexts.fromExecutor(MoreExecutors.directExecutor()));
-    }
-}
index 789e922939a62766c74446f5a3ac2b9e95944b0d..1b5cc255f6a0bc80d7228cbfaf507ddcf0480b66 100644 (file)
@@ -33,8 +33,8 @@ import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.md.cluster.datastore.model.PeopleModel;
 import org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidates;
 
 public class RootDataTreeChangeListenerProxyTest extends AbstractActorTest {
 
@@ -47,7 +47,7 @@ public class RootDataTreeChangeListenerProxyTest extends AbstractActorTest {
         ClusteredDOMDataTreeChangeListener mockClusteredListener = mock(
             ClusteredDOMDataTreeChangeListener.class);
 
-        final YangInstanceIdentifier path = YangInstanceIdentifier.empty();
+        final YangInstanceIdentifier path = YangInstanceIdentifier.of();
         final RootDataTreeChangeListenerProxy<ClusteredDOMDataTreeChangeListener> rootListenerProxy =
             new RootDataTreeChangeListenerProxy<>(actorUtils, mockClusteredListener,
             Set.of("shard-1", "shard-2"));
@@ -79,7 +79,7 @@ public class RootDataTreeChangeListenerProxyTest extends AbstractActorTest {
         final TestKit kit2 = new TestKit(getSystem());
         final ActorSelection rootListenerActor = getSystem().actorSelection(registerForShard1.getListenerActorPath());
         rootListenerActor.tell(new EnableNotification(true, "test"), kit.getRef());
-        final DataTreeCandidate peopleCandidate = DataTreeCandidates.fromNormalizedNode(YangInstanceIdentifier.empty(),
+        final DataTreeCandidate peopleCandidate = DataTreeCandidates.fromNormalizedNode(YangInstanceIdentifier.of(),
             PeopleModel.create());
         rootListenerActor.tell(new DataTreeChanged(ImmutableList.of(peopleCandidate)), kit.getRef());
         rootListenerActor.tell(new DataTreeChanged(ImmutableList.of(peopleCandidate)), kit2.getRef());
index 8ef0f36c3ac319533a42d45d0baf745fb18cc0c5..0b5295584eca4b0100630d641e5a15edbb8a1cc1 100644 (file)
@@ -49,6 +49,7 @@ import org.slf4j.LoggerFactory;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class ShardCommitCoordinationTest extends AbstractShardTest {
     private static final Logger LOG = LoggerFactory.getLogger(ShardCommitCoordinationTest.class);
 
@@ -553,7 +554,7 @@ public class ShardCommitCoordinationTest extends AbstractShardTest {
         LOG.info("{} ending", testName);
     }
 
-    static void verifyInnerListEntry(TestActorRef<Shard> shard, int outerID, String innerID) {
+    static void verifyInnerListEntry(final TestActorRef<Shard> shard, final int outerID, final String innerID) {
         final YangInstanceIdentifier path = innerEntryPath(outerID, innerID);
         final NormalizedNode innerListEntry = readStore(shard, path);
         assertNotNull(path + " not found", innerListEntry);
index 7de4835e374691b7131fe75b5d9be13492e06965..234c69e011bd05a978d13b6927b418a191fb8cf5 100644 (file)
@@ -23,7 +23,8 @@ import org.mockito.InOrder;
 import org.mockito.invocation.InvocationOnMock;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 
 public final class ShardDataTreeMocking {
 
@@ -37,18 +38,18 @@ public final class ShardDataTreeMocking {
     }
 
     public static ShardDataTreeCohort immediateCanCommit(final ShardDataTreeCohort cohort) {
-        final FutureCallback<Void> callback = mockCallback();
-        doNothing().when(callback).onSuccess(null);
+        final FutureCallback<Empty> callback = mockCallback();
+        doNothing().when(callback).onSuccess(Empty.value());
         cohort.canCommit(callback);
 
-        verify(callback).onSuccess(null);
+        verify(callback).onSuccess(Empty.value());
         verifyNoMoreInteractions(callback);
         return cohort;
     }
 
-    public static FutureCallback<Void> coordinatedCanCommit(final ShardDataTreeCohort cohort) {
-        final FutureCallback<Void> callback = mockCallback();
-        doNothing().when(callback).onSuccess(null);
+    public static FutureCallback<Empty> coordinatedCanCommit(final ShardDataTreeCohort cohort) {
+        final FutureCallback<Empty> callback = mockCallback();
+        doNothing().when(callback).onSuccess(Empty.value());
         doNothing().when(callback).onFailure(any(Throwable.class));
         cohort.canCommit(callback);
         return callback;
@@ -102,11 +103,11 @@ public final class ShardDataTreeMocking {
         }).when(preCommitCallback).onSuccess(any(DataTreeCandidate.class));
         doNothing().when(preCommitCallback).onFailure(any(Throwable.class));
 
-        final FutureCallback<Void> canCommit = mockCallback();
+        final FutureCallback<Empty> canCommit = mockCallback();
         doAnswer(invocation -> {
             cohort.preCommit(preCommitCallback);
             return null;
-        }).when(canCommit).onSuccess(null);
+        }).when(canCommit).onSuccess(Empty.value());
         doNothing().when(canCommit).onFailure(any(Throwable.class));
 
         cohort.canCommit(canCommit);
index cfb0379966b4842e1640aa324e5a9cc17b851fcb..ef6b2448f72bc0aac5f3e157a56bc36204713abf 100644 (file)
@@ -36,7 +36,6 @@ import com.google.common.base.Ticker;
 import com.google.common.primitives.UnsignedLong;
 import com.google.common.util.concurrent.FutureCallback;
 import java.io.IOException;
-import java.math.BigInteger;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
@@ -57,6 +56,7 @@ import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
 import org.opendaylight.controller.md.cluster.datastore.model.PeopleModel;
 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.opendaylight.yangtools.yang.common.Uint64;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
@@ -64,18 +64,18 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdent
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
 import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
+import org.opendaylight.yangtools.yang.data.tree.api.TreeType;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidates;
 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 
@@ -194,14 +194,14 @@ public class ShardDataTreeTest extends AbstractTest {
         addCar(shardDataTree, "optima");
 
         verifyOnDataTreeChanged(listener, dtc -> {
-            assertEquals("getModificationType", ModificationType.WRITE, dtc.getRootNode().getModificationType());
+            assertEquals("getModificationType", ModificationType.WRITE, dtc.getRootNode().modificationType());
             assertEquals("getRootPath", CarsModel.newCarPath("optima"), dtc.getRootPath());
         });
 
         addCar(shardDataTree, "sportage");
 
         verifyOnDataTreeChanged(listener, dtc -> {
-            assertEquals("getModificationType", ModificationType.WRITE, dtc.getRootNode().getModificationType());
+            assertEquals("getModificationType", ModificationType.WRITE, dtc.getRootNode().modificationType());
             assertEquals("getRootPath", CarsModel.newCarPath("sportage"), dtc.getRootPath());
         });
 
@@ -219,7 +219,7 @@ public class ShardDataTreeTest extends AbstractTest {
         verifyOnDataTreeChanged(listener, dtc -> {
             ModificationType expType = expChanges.remove(dtc.getRootPath());
             assertNotNull("Got unexpected change for " + dtc.getRootPath(), expType);
-            assertEquals("getModificationType", expType, dtc.getRootNode().getModificationType());
+            assertEquals("getModificationType", expType, dtc.getRootNode().modificationType());
         });
 
         if (!expChanges.isEmpty()) {
@@ -244,64 +244,64 @@ public class ShardDataTreeTest extends AbstractTest {
         final ShardDataTreeCohort cohort4 = newShardDataTreeCohort(snapshot -> snapshot.write(carPath, carNode));
 
         immediateCanCommit(cohort1);
-        final FutureCallback<Void> canCommitCallback2 = coordinatedCanCommit(cohort2);
-        final FutureCallback<Void> canCommitCallback3 = coordinatedCanCommit(cohort3);
-        final FutureCallback<Void> canCommitCallback4 = coordinatedCanCommit(cohort4);
+        final FutureCallback<Empty> canCommitCallback2 = coordinatedCanCommit(cohort2);
+        final FutureCallback<Empty> canCommitCallback3 = coordinatedCanCommit(cohort3);
+        final FutureCallback<Empty> canCommitCallback4 = coordinatedCanCommit(cohort4);
 
         final FutureCallback<DataTreeCandidate> preCommitCallback1 = coordinatedPreCommit(cohort1);
         verify(preCommitCallback1).onSuccess(cohort1.getCandidate());
-        verify(canCommitCallback2).onSuccess(null);
+        verify(canCommitCallback2).onSuccess(Empty.value());
 
         final FutureCallback<DataTreeCandidate> preCommitCallback2 = coordinatedPreCommit(cohort2);
         verify(preCommitCallback2).onSuccess(cohort2.getCandidate());
-        verify(canCommitCallback3).onSuccess(null);
+        verify(canCommitCallback3).onSuccess(Empty.value());
 
         final FutureCallback<DataTreeCandidate> preCommitCallback3 = coordinatedPreCommit(cohort3);
         verify(preCommitCallback3).onSuccess(cohort3.getCandidate());
-        verify(canCommitCallback4).onSuccess(null);
+        verify(canCommitCallback4).onSuccess(Empty.value());
 
         final FutureCallback<DataTreeCandidate> preCommitCallback4 = coordinatedPreCommit(cohort4);
         verify(preCommitCallback4).onSuccess(cohort4.getCandidate());
 
         final FutureCallback<UnsignedLong> commitCallback2 = coordinatedCommit(cohort2);
-        verify(mockShard, never()).persistPayload(eq(cohort1.getIdentifier()), any(CommitTransactionPayload.class),
+        verify(mockShard, never()).persistPayload(eq(cohort1.transactionId()), any(CommitTransactionPayload.class),
                 anyBoolean());
         verifyNoMoreInteractions(commitCallback2);
 
         final FutureCallback<UnsignedLong> commitCallback4 = coordinatedCommit(cohort4);
-        verify(mockShard, never()).persistPayload(eq(cohort4.getIdentifier()), any(CommitTransactionPayload.class),
+        verify(mockShard, never()).persistPayload(eq(cohort4.transactionId()), any(CommitTransactionPayload.class),
                 anyBoolean());
         verifyNoMoreInteractions(commitCallback4);
 
         final FutureCallback<UnsignedLong> commitCallback1 = coordinatedCommit(cohort1);
         InOrder inOrder = inOrder(mockShard);
-        inOrder.verify(mockShard).persistPayload(eq(cohort1.getIdentifier()), any(CommitTransactionPayload.class),
+        inOrder.verify(mockShard).persistPayload(eq(cohort1.transactionId()), any(CommitTransactionPayload.class),
                 eq(true));
-        inOrder.verify(mockShard).persistPayload(eq(cohort2.getIdentifier()), any(CommitTransactionPayload.class),
+        inOrder.verify(mockShard).persistPayload(eq(cohort2.transactionId()), any(CommitTransactionPayload.class),
                 eq(false));
         verifyNoMoreInteractions(commitCallback1);
         verifyNoMoreInteractions(commitCallback2);
 
         final FutureCallback<UnsignedLong> commitCallback3 = coordinatedCommit(cohort3);
         inOrder = inOrder(mockShard);
-        inOrder.verify(mockShard).persistPayload(eq(cohort3.getIdentifier()), any(CommitTransactionPayload.class),
+        inOrder.verify(mockShard).persistPayload(eq(cohort3.transactionId()), any(CommitTransactionPayload.class),
                 eq(true));
-        inOrder.verify(mockShard).persistPayload(eq(cohort4.getIdentifier()), any(CommitTransactionPayload.class),
+        inOrder.verify(mockShard).persistPayload(eq(cohort4.transactionId()), any(CommitTransactionPayload.class),
                 eq(false));
         verifyNoMoreInteractions(commitCallback3);
         verifyNoMoreInteractions(commitCallback4);
 
         final ShardDataTreeCohort cohort5 = newShardDataTreeCohort(snapshot ->
             snapshot.merge(CarsModel.BASE_PATH, CarsModel.emptyContainer()));
-        final FutureCallback<Void> canCommitCallback5 = coordinatedCanCommit(cohort5);
+        final FutureCallback<Empty> canCommitCallback5 = coordinatedCanCommit(cohort5);
 
         // The payload instance doesn't matter - it just needs to be of type CommitTransactionPayload.
         CommitTransactionPayload mockPayload = CommitTransactionPayload.create(nextTransactionId(),
                 cohort1.getCandidate());
-        shardDataTree.applyReplicatedPayload(cohort1.getIdentifier(), mockPayload);
-        shardDataTree.applyReplicatedPayload(cohort2.getIdentifier(), mockPayload);
-        shardDataTree.applyReplicatedPayload(cohort3.getIdentifier(), mockPayload);
-        shardDataTree.applyReplicatedPayload(cohort4.getIdentifier(), mockPayload);
+        shardDataTree.applyReplicatedPayload(cohort1.transactionId(), mockPayload);
+        shardDataTree.applyReplicatedPayload(cohort2.transactionId(), mockPayload);
+        shardDataTree.applyReplicatedPayload(cohort3.transactionId(), mockPayload);
+        shardDataTree.applyReplicatedPayload(cohort4.transactionId(), mockPayload);
 
         inOrder = inOrder(commitCallback1, commitCallback2, commitCallback3, commitCallback4);
         inOrder.verify(commitCallback1).onSuccess(any(UnsignedLong.class));
@@ -309,17 +309,12 @@ public class ShardDataTreeTest extends AbstractTest {
         inOrder.verify(commitCallback3).onSuccess(any(UnsignedLong.class));
         inOrder.verify(commitCallback4).onSuccess(any(UnsignedLong.class));
 
-        verify(canCommitCallback5).onSuccess(null);
+        verify(canCommitCallback5).onSuccess(Empty.value());
 
         final DataTreeSnapshot snapshot =
                 shardDataTree.newReadOnlyTransaction(nextTransactionId()).getSnapshot();
-        Optional<NormalizedNode> optional = snapshot.readNode(carPath);
-        assertTrue("Car node present", optional.isPresent());
-        assertEquals("Car node", carNode, optional.get());
-
-        optional = snapshot.readNode(PeopleModel.BASE_PATH);
-        assertTrue("People node present", optional.isPresent());
-        assertEquals("People node", peopleNode, optional.get());
+        assertEquals("Car node", Optional.of(carNode), snapshot.readNode(carPath));
+        assertEquals("People node", Optional.of(peopleNode), snapshot.readNode(PeopleModel.BASE_PATH));
     }
 
     @Test
@@ -339,19 +334,19 @@ public class ShardDataTreeTest extends AbstractTest {
         final FutureCallback<UnsignedLong> commitCallback1 = immediate3PhaseCommit(cohort1);
 
         InOrder inOrder = inOrder(mockShard);
-        inOrder.verify(mockShard).persistPayload(eq(cohort1.getIdentifier()), any(CommitTransactionPayload.class),
+        inOrder.verify(mockShard).persistPayload(eq(cohort1.transactionId()), any(CommitTransactionPayload.class),
                 eq(true));
-        inOrder.verify(mockShard).persistPayload(eq(cohort2.getIdentifier()), any(CommitTransactionPayload.class),
+        inOrder.verify(mockShard).persistPayload(eq(cohort2.transactionId()), any(CommitTransactionPayload.class),
                 eq(true));
-        inOrder.verify(mockShard).persistPayload(eq(cohort3.getIdentifier()), any(CommitTransactionPayload.class),
+        inOrder.verify(mockShard).persistPayload(eq(cohort3.transactionId()), any(CommitTransactionPayload.class),
                 eq(false));
 
         // The payload instance doesn't matter - it just needs to be of type CommitTransactionPayload.
         CommitTransactionPayload mockPayload = CommitTransactionPayload.create(nextTransactionId(),
                 cohort1.getCandidate());
-        shardDataTree.applyReplicatedPayload(cohort1.getIdentifier(), mockPayload);
-        shardDataTree.applyReplicatedPayload(cohort2.getIdentifier(), mockPayload);
-        shardDataTree.applyReplicatedPayload(cohort3.getIdentifier(), mockPayload);
+        shardDataTree.applyReplicatedPayload(cohort1.transactionId(), mockPayload);
+        shardDataTree.applyReplicatedPayload(cohort2.transactionId(), mockPayload);
+        shardDataTree.applyReplicatedPayload(cohort3.transactionId(), mockPayload);
 
         inOrder = inOrder(commitCallback1, commitCallback2, commitCallback3);
         inOrder.verify(commitCallback1).onSuccess(any(UnsignedLong.class));
@@ -360,9 +355,7 @@ public class ShardDataTreeTest extends AbstractTest {
 
         final DataTreeSnapshot snapshot =
                 shardDataTree.newReadOnlyTransaction(nextTransactionId()).getSnapshot();
-        Optional<NormalizedNode> optional = snapshot.readNode(carPath);
-        assertTrue("Car node present", optional.isPresent());
-        assertEquals("Car node", carNode, optional.get());
+        assertEquals("Car node", Optional.of(carNode), snapshot.readNode(carPath));
     }
 
     @Test
@@ -418,10 +411,10 @@ public class ShardDataTreeTest extends AbstractTest {
         coordinatedPreCommit(cohort2);
         coordinatedPreCommit(cohort3);
 
-        FutureCallback<Void> mockAbortCallback = mock(FutureCallback.class);
-        doNothing().when(mockAbortCallback).onSuccess(null);
+        FutureCallback<Empty> mockAbortCallback = mock(FutureCallback.class);
+        doNothing().when(mockAbortCallback).onSuccess(Empty.value());
         cohort2.abort(mockAbortCallback);
-        verify(mockAbortCallback).onSuccess(null);
+        verify(mockAbortCallback).onSuccess(Empty.value());
 
         coordinatedPreCommit(cohort4);
         coordinatedCommit(cohort1);
@@ -429,25 +422,24 @@ public class ShardDataTreeTest extends AbstractTest {
         coordinatedCommit(cohort4);
 
         InOrder inOrder = inOrder(mockShard);
-        inOrder.verify(mockShard).persistPayload(eq(cohort1.getIdentifier()), any(CommitTransactionPayload.class),
+        inOrder.verify(mockShard).persistPayload(eq(cohort1.transactionId()), any(CommitTransactionPayload.class),
                 eq(false));
-        inOrder.verify(mockShard).persistPayload(eq(cohort3.getIdentifier()), any(CommitTransactionPayload.class),
+        inOrder.verify(mockShard).persistPayload(eq(cohort3.transactionId()), any(CommitTransactionPayload.class),
                 eq(false));
-        inOrder.verify(mockShard).persistPayload(eq(cohort4.getIdentifier()), any(CommitTransactionPayload.class),
+        inOrder.verify(mockShard).persistPayload(eq(cohort4.transactionId()), any(CommitTransactionPayload.class),
                 eq(false));
 
         // The payload instance doesn't matter - it just needs to be of type CommitTransactionPayload.
         CommitTransactionPayload mockPayload = CommitTransactionPayload.create(nextTransactionId(),
                 cohort1.getCandidate());
-        shardDataTree.applyReplicatedPayload(cohort1.getIdentifier(), mockPayload);
-        shardDataTree.applyReplicatedPayload(cohort3.getIdentifier(), mockPayload);
-        shardDataTree.applyReplicatedPayload(cohort4.getIdentifier(), mockPayload);
+        shardDataTree.applyReplicatedPayload(cohort1.transactionId(), mockPayload);
+        shardDataTree.applyReplicatedPayload(cohort3.transactionId(), mockPayload);
+        shardDataTree.applyReplicatedPayload(cohort4.transactionId(), mockPayload);
 
         final DataTreeSnapshot snapshot =
                 shardDataTree.newReadOnlyTransaction(nextTransactionId()).getSnapshot();
         Optional<NormalizedNode> optional = snapshot.readNode(carPath);
-        assertTrue("Car node present", optional.isPresent());
-        assertEquals("Car node", carNode, optional.get());
+        assertEquals("Car node", Optional.of(carNode), optional);
     }
 
     @SuppressWarnings("unchecked")
@@ -466,15 +458,15 @@ public class ShardDataTreeTest extends AbstractTest {
             snapshot.write(PeopleModel.BASE_PATH, peopleNode));
 
         immediateCanCommit(cohort1);
-        FutureCallback<Void> canCommitCallback2 = coordinatedCanCommit(cohort2);
+        FutureCallback<Empty> canCommitCallback2 = coordinatedCanCommit(cohort2);
 
         coordinatedPreCommit(cohort1);
-        verify(canCommitCallback2).onSuccess(null);
+        verify(canCommitCallback2).onSuccess(Empty.value());
 
-        FutureCallback<Void> mockAbortCallback = mock(FutureCallback.class);
-        doNothing().when(mockAbortCallback).onSuccess(null);
+        FutureCallback<Empty> mockAbortCallback = mock(FutureCallback.class);
+        doNothing().when(mockAbortCallback).onSuccess(Empty.value());
         cohort1.abort(mockAbortCallback);
-        verify(mockAbortCallback).onSuccess(null);
+        verify(mockAbortCallback).onSuccess(Empty.value());
 
         FutureCallback<DataTreeCandidate> preCommitCallback2 = coordinatedPreCommit(cohort2);
         verify(preCommitCallback2).onFailure(any(Throwable.class));
@@ -486,15 +478,14 @@ public class ShardDataTreeTest extends AbstractTest {
         final DataTreeSnapshot snapshot =
                 shardDataTree.newReadOnlyTransaction(nextTransactionId()).getSnapshot();
         Optional<NormalizedNode> optional = snapshot.readNode(PeopleModel.BASE_PATH);
-        assertTrue("People node present", optional.isPresent());
-        assertEquals("People node", peopleNode, optional.get());
+        assertEquals("People node", Optional.of(peopleNode), optional);
     }
 
     @Test
     public void testUintCommitPayload() throws IOException {
         shardDataTree.applyRecoveryPayload(CommitTransactionPayload.create(nextTransactionId(),
-            DataTreeCandidates.fromNormalizedNode(YangInstanceIdentifier.empty(), bigIntegerRoot()),
-            PayloadVersion.SODIUM_SR1));
+            DataTreeCandidates.fromNormalizedNode(YangInstanceIdentifier.of(), bigIntegerRoot()),
+            PayloadVersion.POTASSIUM));
 
         assertCarsUint64();
     }
@@ -520,7 +511,7 @@ public class ShardDataTreeTest extends AbstractTest {
                 .withNodeIdentifier(new NodeIdentifier(CarsModel.BASE_QNAME))
                 .withChild(Builders.mapBuilder()
                     .withNodeIdentifier(new NodeIdentifier(CarsModel.CAR_QNAME))
-                    .withChild(createCar("one", BigInteger.ONE))
+                    .withChild(createCar("one", Uint64.ONE))
                     .build())
                 .build());
         mod.ready();
@@ -529,7 +520,7 @@ public class ShardDataTreeTest extends AbstractTest {
         dataTree.commit(first);
 
         mod = dataTree.takeSnapshot().newModification();
-        mod.write(CarsModel.newCarPath("two"), createCar("two", BigInteger.TWO));
+        mod.write(CarsModel.newCarPath("two"), createCar("two", Uint64.TWO));
         mod.ready();
         dataTree.validate(mod);
         final DataTreeCandidate second = dataTree.prepare(mod);
@@ -538,7 +529,7 @@ public class ShardDataTreeTest extends AbstractTest {
         mod = dataTree.takeSnapshot().newModification();
         mod.merge(CarsModel.CAR_LIST_PATH, Builders.mapBuilder()
             .withNodeIdentifier(new NodeIdentifier(CarsModel.CAR_QNAME))
-            .withChild(createCar("three", BigInteger.TEN))
+            .withChild(createCar("three", Uint64.TEN))
             .build());
         mod.ready();
         dataTree.validate(mod);
@@ -546,17 +537,16 @@ public class ShardDataTreeTest extends AbstractTest {
         dataTree.commit(third);
 
         // Apply first candidate as a snapshot
-        shardDataTree.applyRecoverySnapshot(
-            new ShardSnapshotState(new MetadataShardDataTreeSnapshot(first.getRootNode().getDataAfter().get()), true));
+        shardDataTree.applyRecoverySnapshot(new ShardSnapshotState(
+            new MetadataShardDataTreeSnapshot(first.getRootNode().getDataAfter()), true));
         // Apply the other two snapshots as transactions
         shardDataTree.applyRecoveryPayload(CommitTransactionPayload.create(nextTransactionId(), second,
-            PayloadVersion.SODIUM_SR1));
+            PayloadVersion.POTASSIUM));
         shardDataTree.applyRecoveryPayload(CommitTransactionPayload.create(nextTransactionId(), third,
-            PayloadVersion.SODIUM_SR1));
+            PayloadVersion.POTASSIUM));
 
         // Verify uint translation
         final DataTreeSnapshot snapshot = shardDataTree.newReadOnlyTransaction(nextTransactionId()).getSnapshot();
-        final NormalizedNode cars = snapshot.readNode(CarsModel.CAR_LIST_PATH).get();
 
         assertEquals(Builders.mapBuilder()
             .withNodeIdentifier(new NodeIdentifier(CarsModel.CAR_QNAME))
@@ -564,12 +554,12 @@ public class ShardDataTreeTest extends AbstractTest {
             .withChild(createCar("one", Uint64.ONE))
             .withChild(createCar("two", Uint64.TWO))
             .withChild(createCar("three", Uint64.TEN))
-            .build(), cars);
+            .build(), snapshot.readNode(CarsModel.CAR_LIST_PATH).orElseThrow());
     }
 
     private void assertCarsUint64() {
         final DataTreeSnapshot snapshot = shardDataTree.newReadOnlyTransaction(nextTransactionId()).getSnapshot();
-        final NormalizedNode cars = snapshot.readNode(CarsModel.CAR_LIST_PATH).get();
+        final NormalizedNode cars = snapshot.readNode(CarsModel.CAR_LIST_PATH).orElseThrow();
 
         assertEquals(Builders.mapBuilder()
             .withNodeIdentifier(new NodeIdentifier(CarsModel.CAR_QNAME))
@@ -580,25 +570,24 @@ public class ShardDataTreeTest extends AbstractTest {
 
     private static ContainerNode bigIntegerRoot() {
         return Builders.containerBuilder()
-                .withNodeIdentifier(new NodeIdentifier(SchemaContext.NAME))
-                .withChild(Builders.containerBuilder()
-                    .withNodeIdentifier(new NodeIdentifier(CarsModel.CARS_QNAME))
-                    .withChild(Builders.mapBuilder()
-                        .withNodeIdentifier(new NodeIdentifier(CarsModel.CAR_QNAME))
-                        // Note: BigInteger
-                        .withChild(createCar("foo", BigInteger.ONE))
-                        .build())
+            .withNodeIdentifier(new NodeIdentifier(SchemaContext.NAME))
+            .withChild(Builders.containerBuilder()
+                .withNodeIdentifier(new NodeIdentifier(CarsModel.CARS_QNAME))
+                .withChild(Builders.mapBuilder()
+                    .withNodeIdentifier(new NodeIdentifier(CarsModel.CAR_QNAME))
+                    .withChild(createCar("foo", Uint64.ONE))
                     .build())
-                .build();
+                .build())
+            .build();
     }
 
     private static MapEntryNode createCar(final String name, final Object value) {
         return Builders.mapEntryBuilder()
-                .withNodeIdentifier(NodeIdentifierWithPredicates.of(CarsModel.CAR_QNAME,CarsModel.CAR_NAME_QNAME, name))
-                .withChild(ImmutableNodes.leafNode(CarsModel.CAR_NAME_QNAME, name))
-                // Note: old BigInteger
-                .withChild(ImmutableNodes.leafNode(CarsModel.CAR_PRICE_QNAME, value))
-                .build();
+            .withNodeIdentifier(NodeIdentifierWithPredicates.of(CarsModel.CAR_QNAME, CarsModel.CAR_NAME_QNAME, name))
+            .withChild(ImmutableNodes.leafNode(CarsModel.CAR_NAME_QNAME, name))
+            // Note: old BigInteger
+            .withChild(ImmutableNodes.leafNode(CarsModel.CAR_PRICE_QNAME, value))
+            .build();
     }
 
     private ShardDataTreeCohort newShardDataTreeCohort(final DataTreeOperation operation) {
@@ -612,7 +601,7 @@ public class ShardDataTreeTest extends AbstractTest {
     @SuppressWarnings({ "rawtypes", "unchecked" })
     private static void verifyOnDataTreeChanged(final DOMDataTreeChangeListener listener,
             final Consumer<DataTreeCandidate> callback) {
-        ArgumentCaptor<Collection> changes = ArgumentCaptor.forClass(Collection.class);
+        ArgumentCaptor<List> changes = ArgumentCaptor.forClass(List.class);
         verify(listener, atLeastOnce()).onDataTreeChanged(changes.capture());
         for (Collection list : changes.getAllValues()) {
             for (Object dtc : list) {
@@ -632,7 +621,7 @@ public class ShardDataTreeTest extends AbstractTest {
 
         assertTrue(optional.isPresent());
 
-        return optional.get();
+        return optional.orElseThrow();
     }
 
     private static DataTreeCandidate addCar(final ShardDataTree shardDataTree) {
index 31026fd79ff273626a6bf6dc0a6af6af49eef0e1..996aa86636415c9843982709828f25ef222ceb00 100644 (file)
@@ -24,15 +24,15 @@ import org.opendaylight.controller.md.cluster.datastore.model.PeopleModel;
 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.SchemaValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.SchemaValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.TreeType;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -131,6 +131,6 @@ public class ShardRecoveryCoordinatorTest extends AbstractTest {
         dataTree.commit(dataTree.prepare(modification));
 
         return new ShardSnapshotState(new MetadataShardDataTreeSnapshot(dataTree.takeSnapshot().readNode(
-                YangInstanceIdentifier.empty()).get()));
+                YangInstanceIdentifier.of()).orElseThrow()));
     }
 }
index 3c26d27be0041683d926afbb7f8dd8cef057902b..e0db8543f6b6a96beb697f1968f4bf9cd44acfd7 100644 (file)
@@ -13,6 +13,7 @@ import static org.hamcrest.MatcherAssert.assertThat;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertSame;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -93,30 +94,31 @@ import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
 import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
 import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.messages.RequestVote;
 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
 import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
 import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
 import org.opendaylight.controller.cluster.raft.policy.DisableElectionsRaftPolicy;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
 import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.yangtools.concepts.Identifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
 import scala.concurrent.Await;
 import scala.concurrent.Future;
 import scala.concurrent.duration.FiniteDuration;
@@ -293,14 +295,16 @@ public class ShardTest extends AbstractShardTest {
         final DataTree store = new InMemoryDataTreeFactory().create(DataTreeConfiguration.DEFAULT_OPERATIONAL,
             SCHEMA_CONTEXT);
 
-        final ContainerNode container = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
-                    .withChild(ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).addChild(
-                        ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)).build()).build();
+        final ContainerNode container = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME)
+                .addChild(ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1))
+                .build())
+            .build();
 
         writeToStore(store, TestModel.TEST_PATH, container);
 
-        final YangInstanceIdentifier root = YangInstanceIdentifier.empty();
+        final YangInstanceIdentifier root = YangInstanceIdentifier.of();
         final NormalizedNode expected = readStore(store, root);
 
         final Snapshot snapshot = Snapshot.create(new ShardSnapshotState(new MetadataShardDataTreeSnapshot(expected)),
@@ -643,6 +647,7 @@ public class ShardTest extends AbstractShardTest {
         verifyOuterListEntry(shard, 1);
     }
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     @Test(expected = IllegalStateException.class)
     public void testBatchedModificationsReadyWithIncorrectTotalMessageCount() throws Exception {
         final ShardTestKit testKit = new ShardTestKit(getSystem());
@@ -669,6 +674,7 @@ public class ShardTest extends AbstractShardTest {
     }
 
     @Test
+    @Deprecated(since = "9.0.0", forRemoval = true)
     public void testBatchedModificationsWithOperationFailure() {
         final ShardTestKit testKit = new ShardTestKit(getSystem());
         final TestActorRef<Shard> shard = actorFactory.createTestActor(
@@ -683,9 +689,10 @@ public class ShardTest extends AbstractShardTest {
 
         final TransactionIdentifier transactionID = nextTransactionId();
 
-        final ContainerNode invalidData = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
-                .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build();
+        final ContainerNode invalidData = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk"))
+            .build();
 
         BatchedModifications batched = new BatchedModifications(transactionID, CURRENT_VERSION);
         batched.addModification(new MergeModification(TestModel.TEST_PATH, invalidData));
@@ -757,6 +764,7 @@ public class ShardTest extends AbstractShardTest {
     }
 
     @Test
+    @Deprecated(since = "9.0.0", forRemoval = true)
     public void testOnBatchedModificationsWhenNotLeader() {
         final AtomicBoolean overrideLeaderCalls = new AtomicBoolean();
         final ShardTestKit testKit = new ShardTestKit(getSystem());
@@ -797,6 +805,7 @@ public class ShardTest extends AbstractShardTest {
     }
 
     @Test
+    @Deprecated(since = "9.0.0", forRemoval = true)
     public void testTransactionMessagesWithNoLeader() {
         final ShardTestKit testKit = new ShardTestKit(getSystem());
         dataStoreContextBuilder.customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName())
@@ -1185,7 +1194,7 @@ public class ShardTest extends AbstractShardTest {
         final Duration duration = Duration.ofSeconds(5);
         final TransactionIdentifier transactionID1 = nextTransactionId();
 
-        doThrow(new DataValidationFailedException(YangInstanceIdentifier.empty(), "mock canCommit failure"))
+        doThrow(new DataValidationFailedException(YangInstanceIdentifier.of(), "mock canCommit failure"))
         .doNothing().when(dataTree).validate(any(DataTreeModification.class));
 
         shard.tell(newBatchedModifications(transactionID1, TestModel.TEST_PATH,
@@ -1226,7 +1235,7 @@ public class ShardTest extends AbstractShardTest {
 
         ShardTestKit.waitUntilLeader(shard);
 
-        doThrow(new DataValidationFailedException(YangInstanceIdentifier.empty(), "mock canCommit failure"))
+        doThrow(new DataValidationFailedException(YangInstanceIdentifier.of(), "mock canCommit failure"))
         .doNothing().when(dataTree).validate(any(DataTreeModification.class));
 
         final Duration duration = Duration.ofSeconds(5);
@@ -1263,8 +1272,7 @@ public class ShardTest extends AbstractShardTest {
         final ShardTestKit testKit = new ShardTestKit(getSystem());
         final Creator<Shard> creator = () -> new Shard(newShardBuilder()) {
             @Override
-            void persistPayload(final Identifier id, final Payload payload,
-                    final boolean batchHint) {
+            void persistPayload(final Identifier id, final Payload payload, final boolean batchHint) {
                 // Simulate an AbortTransaction message occurring during
                 // replication, after
                 // persisting and before finishing the commit to the
@@ -1739,7 +1747,7 @@ public class ShardTest extends AbstractShardTest {
         ShardTestKit.waitUntilLeader(shard);
         writeToStore(shard, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
 
-        final NormalizedNode expectedRoot = readStore(shard, YangInstanceIdentifier.empty());
+        final NormalizedNode expectedRoot = readStore(shard, YangInstanceIdentifier.of());
 
         // Trigger creation of a snapshot by ensuring
         final RaftActorContext raftActorContext = ((TestShard) shard.underlyingActor()).getRaftActorContext();
@@ -1764,8 +1772,8 @@ public class ShardTest extends AbstractShardTest {
     }
 
     private static void verifySnapshot(final Snapshot snapshot, final NormalizedNode expectedRoot) {
-        final NormalizedNode actual = ((ShardSnapshotState)snapshot.getState()).getSnapshot().getRootNode().get();
-        assertEquals("Root node", expectedRoot, actual);
+        assertEquals("Root node", expectedRoot,
+            ((ShardSnapshotState)snapshot.getState()).getSnapshot().getRootNode().orElseThrow());
     }
 
     /**
@@ -1782,16 +1790,16 @@ public class ShardTest extends AbstractShardTest {
         commitTransaction(store, putTransaction);
 
 
-        final NormalizedNode expected = readStore(store, YangInstanceIdentifier.empty());
+        final NormalizedNode expected = readStore(store, YangInstanceIdentifier.of());
 
         final DataTreeModification writeTransaction = store.takeSnapshot().newModification();
 
-        writeTransaction.delete(YangInstanceIdentifier.empty());
-        writeTransaction.write(YangInstanceIdentifier.empty(), expected);
+        writeTransaction.delete(YangInstanceIdentifier.of());
+        writeTransaction.write(YangInstanceIdentifier.of(), expected);
 
         commitTransaction(store, writeTransaction);
 
-        final NormalizedNode actual = readStore(store, YangInstanceIdentifier.empty());
+        final NormalizedNode actual = readStore(store, YangInstanceIdentifier.of());
 
         assertEquals(expected, actual);
     }
@@ -1856,9 +1864,9 @@ public class ShardTest extends AbstractShardTest {
 
         ShardLeaderStateChanged leaderStateChanged = MessageCollectorActor.expectFirstMatching(listener,
             ShardLeaderStateChanged.class);
-        assertTrue("getLocalShardDataTree present", leaderStateChanged.getLocalShardDataTree().isPresent());
-        assertSame("getLocalShardDataTree", shard.underlyingActor().getDataStore().getDataTree(),
-            leaderStateChanged.getLocalShardDataTree().get());
+        final var dataTree = leaderStateChanged.localShardDataTree();
+        assertNotNull("getLocalShardDataTree present", dataTree);
+        assertSame("getLocalShardDataTree", shard.underlyingActor().getDataStore().getDataTree(), dataTree);
 
         MessageCollectorActor.clearMessages(listener);
 
@@ -1867,7 +1875,7 @@ public class ShardTest extends AbstractShardTest {
         shard.tell(new RequestVote(10000, "member2", 50, 50), testKit.getRef());
 
         leaderStateChanged = MessageCollectorActor.expectFirstMatching(listener, ShardLeaderStateChanged.class);
-        assertFalse("getLocalShardDataTree present", leaderStateChanged.getLocalShardDataTree().isPresent());
+        assertNull("getLocalShardDataTree present", leaderStateChanged.localShardDataTree());
     }
 
     @Test
index b22ca277e507479ac86846c912f18e9f5d0615e9..515c0b5b92bd47af4df09dfcd6e60cb2e6093f38 100644 (file)
@@ -17,7 +17,6 @@ import akka.testkit.javadsl.EventFilter;
 import akka.testkit.javadsl.TestKit;
 import akka.util.Timeout;
 import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.Optional;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
@@ -48,10 +47,9 @@ public class ShardTestKit extends TestKit {
         for (int i = 0; i < 20 * 5; i++) {
             Future<Object> future = Patterns.ask(shard, FindLeader.INSTANCE, new Timeout(duration));
             try {
-                final Optional<String> maybeLeader = ((FindLeaderReply) Await.result(future, duration))
-                        .getLeaderActor();
+                final var maybeLeader = ((FindLeaderReply) Await.result(future, duration)).getLeaderActor();
                 if (maybeLeader.isPresent()) {
-                    return maybeLeader.get();
+                    return maybeLeader.orElseThrow();
                 }
             } catch (TimeoutException e) {
                 LOG.trace("FindLeader timed out", e);
@@ -73,13 +71,12 @@ public class ShardTestKit extends TestKit {
         for (int i = 0; i < 20 * 5; i++) {
             Future<Object> future = Patterns.ask(shard, FindLeader.INSTANCE, new Timeout(duration));
             try {
-                final Optional<String> maybeLeader = ((FindLeaderReply) Await.result(future, duration))
-                        .getLeaderActor();
+                final var maybeLeader = ((FindLeaderReply) Await.result(future, duration)).getLeaderActor();
                 if (!maybeLeader.isPresent()) {
                     return;
                 }
 
-                lastResponse = maybeLeader.get();
+                lastResponse = maybeLeader.orElseThrow();
             } catch (TimeoutException e) {
                 lastResponse = e;
             } catch (Exception e) {
index 1ca294db5846d5083e3dda09bfa0bee9bf8d858c..7a96f263c66a12de3c45179657667834bdb8d69c 100644 (file)
@@ -23,7 +23,7 @@ import org.opendaylight.controller.cluster.datastore.messages.ReadData;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.mdsal.common.api.ReadFailedException;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
+import org.opendaylight.yangtools.yang.data.tree.api.TreeType;
 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import scala.concurrent.Await;
 import scala.concurrent.Future;
@@ -34,6 +34,7 @@ import scala.concurrent.duration.FiniteDuration;
  *
  * @author Basheeruddin Ahmed
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class ShardTransactionFailureTest extends AbstractActorTest {
     private static final EffectiveModelContext TEST_SCHEMA_CONTEXT = TestModel.createTestContext();
     private static final TransactionType RO = TransactionType.READ_ONLY;
@@ -73,12 +74,12 @@ public class ShardTransactionFailureTest extends AbstractActorTest {
                 "testNegativeReadWithReadOnlyTransactionClosed");
 
         Future<Object> future = akka.pattern.Patterns.ask(subject,
-                new ReadData(YangInstanceIdentifier.empty(), DataStoreVersions.CURRENT_VERSION), 3000);
+                new ReadData(YangInstanceIdentifier.of(), DataStoreVersions.CURRENT_VERSION), 3000);
         Await.result(future, FiniteDuration.create(3, TimeUnit.SECONDS));
 
         subject.underlyingActor().getDOMStoreTransaction().abortFromTransactionActor();
 
-        future = akka.pattern.Patterns.ask(subject, new ReadData(YangInstanceIdentifier.empty(),
+        future = akka.pattern.Patterns.ask(subject, new ReadData(YangInstanceIdentifier.of(),
                 DataStoreVersions.CURRENT_VERSION), 3000);
         Await.result(future, FiniteDuration.create(3, TimeUnit.SECONDS));
     }
@@ -95,12 +96,12 @@ public class ShardTransactionFailureTest extends AbstractActorTest {
                 "testNegativeReadWithReadWriteTransactionClosed");
 
         Future<Object> future = akka.pattern.Patterns.ask(subject,
-                new ReadData(YangInstanceIdentifier.empty(), DataStoreVersions.CURRENT_VERSION), 3000);
+                new ReadData(YangInstanceIdentifier.of(), DataStoreVersions.CURRENT_VERSION), 3000);
         Await.result(future, FiniteDuration.create(3, TimeUnit.SECONDS));
 
         subject.underlyingActor().getDOMStoreTransaction().abortFromTransactionActor();
 
-        future = akka.pattern.Patterns.ask(subject, new ReadData(YangInstanceIdentifier.empty(),
+        future = akka.pattern.Patterns.ask(subject, new ReadData(YangInstanceIdentifier.of(),
                 DataStoreVersions.CURRENT_VERSION), 3000);
         Await.result(future, FiniteDuration.create(3, TimeUnit.SECONDS));
     }
@@ -116,13 +117,13 @@ public class ShardTransactionFailureTest extends AbstractActorTest {
                 "testNegativeExistsWithReadWriteTransactionClosed");
 
         Future<Object> future = akka.pattern.Patterns.ask(subject,
-                new DataExists(YangInstanceIdentifier.empty(), DataStoreVersions.CURRENT_VERSION), 3000);
+                new DataExists(YangInstanceIdentifier.of(), DataStoreVersions.CURRENT_VERSION), 3000);
         Await.result(future, FiniteDuration.create(3, TimeUnit.SECONDS));
 
         subject.underlyingActor().getDOMStoreTransaction().abortFromTransactionActor();
 
         future = akka.pattern.Patterns.ask(subject,
-                new DataExists(YangInstanceIdentifier.empty(), DataStoreVersions.CURRENT_VERSION), 3000);
+                new DataExists(YangInstanceIdentifier.of(), DataStoreVersions.CURRENT_VERSION), 3000);
         Await.result(future, FiniteDuration.create(3, TimeUnit.SECONDS));
     }
 }
index 5097197ff316834692a9f3a622751c975b0b2347..d80a5d1fc04b260f6907467ed365d4bb13b94e22 100644 (file)
@@ -47,13 +47,15 @@ import org.opendaylight.controller.cluster.datastore.modification.WriteModificat
 import org.opendaylight.controller.cluster.raft.TestActorFactory;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class ShardTransactionTest extends AbstractActorTest {
 
     private static final TransactionType RO = TransactionType.READ_ONLY;
@@ -104,7 +106,7 @@ public class ShardTransactionTest extends AbstractActorTest {
     }
 
     private void testOnReceiveReadData(final ActorRef transaction) {
-        transaction.tell(new ReadData(YangInstanceIdentifier.empty(), DataStoreVersions.CURRENT_VERSION),
+        transaction.tell(new ReadData(YangInstanceIdentifier.of(), DataStoreVersions.CURRENT_VERSION),
             testKit.getRef());
 
         ReadDataReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ReadDataReply.class);
@@ -135,7 +137,7 @@ public class ShardTransactionTest extends AbstractActorTest {
     }
 
     private void testOnReceiveDataExistsPositive(final ActorRef transaction) {
-        transaction.tell(new DataExists(YangInstanceIdentifier.empty(), DataStoreVersions.CURRENT_VERSION),
+        transaction.tell(new DataExists(YangInstanceIdentifier.of(), DataStoreVersions.CURRENT_VERSION),
             testKit.getRef());
 
         DataExistsReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), DataExistsReply.class);
@@ -166,14 +168,15 @@ public class ShardTransactionTest extends AbstractActorTest {
         final ActorRef transaction = newTransactionActor(RW, mockWriteTx, "testOnReceiveBatchedModifications");
 
         YangInstanceIdentifier writePath = TestModel.TEST_PATH;
-        NormalizedNode writeData = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
-                .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+        NormalizedNode writeData = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+            .build();
 
         YangInstanceIdentifier mergePath = TestModel.OUTER_LIST_PATH;
-        NormalizedNode mergeData = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.OUTER_LIST_QNAME))
-                .build();
+        NormalizedNode mergeData = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.OUTER_LIST_QNAME))
+            .build();
 
         YangInstanceIdentifier deletePath = TestModel.TEST_PATH;
 
@@ -204,9 +207,10 @@ public class ShardTransactionTest extends AbstractActorTest {
         watcher.watch(transaction);
 
         YangInstanceIdentifier writePath = TestModel.TEST_PATH;
-        NormalizedNode writeData = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
-                .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+        NormalizedNode writeData = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+            .build();
 
         final TransactionIdentifier tx1 = nextTransactionId();
         BatchedModifications batched = new BatchedModifications(tx1, DataStoreVersions.CURRENT_VERSION);
@@ -235,9 +239,10 @@ public class ShardTransactionTest extends AbstractActorTest {
         watcher.watch(transaction);
 
         YangInstanceIdentifier writePath = TestModel.TEST_PATH;
-        NormalizedNode writeData = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
-                .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+        NormalizedNode writeData = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+            .build();
 
         BatchedModifications batched = new BatchedModifications(nextTransactionId(),
             DataStoreVersions.CURRENT_VERSION);
index 65cf2eac375ee5e3fd6b1ed912361ded25899f3c..b8390f9fcdbb9f19f2c9ddf91d780ca7db65bf6d 100644 (file)
@@ -26,12 +26,13 @@ import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.mockito.Mock;
 import org.mockito.junit.MockitoJUnitRunner;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ConflictingModificationAppliedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.ConflictingModificationAppliedException;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateTip;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
 
 /**
  * Unit tests for SimpleShardDataTreeCohort.
@@ -75,10 +76,10 @@ public class SimpleShardDataTreeCohortTest extends AbstractTest {
         }).when(mockShardDataTree).startCanCommit(cohort);
 
         @SuppressWarnings("unchecked")
-        final FutureCallback<Void> callback = mock(FutureCallback.class);
+        final FutureCallback<Empty> callback = mock(FutureCallback.class);
         cohort.canCommit(callback);
 
-        verify(callback).onSuccess(null);
+        verify(callback).onSuccess(Empty.value());
         verifyNoMoreInteractions(callback);
     }
 
@@ -89,7 +90,7 @@ public class SimpleShardDataTreeCohortTest extends AbstractTest {
         }).when(mockShardDataTree).startCanCommit(cohort);
 
         @SuppressWarnings("unchecked")
-        final FutureCallback<Void> callback = mock(FutureCallback.class);
+        final FutureCallback<Empty> callback = mock(FutureCallback.class);
         cohort.canCommit(callback);
 
         verify(callback).onFailure(cause);
@@ -98,12 +99,12 @@ public class SimpleShardDataTreeCohortTest extends AbstractTest {
 
     @Test
     public void testCanCommitWithConflictingModEx() {
-        testValidatationPropagates(new ConflictingModificationAppliedException(YangInstanceIdentifier.empty(), "mock"));
+        testValidatationPropagates(new ConflictingModificationAppliedException(YangInstanceIdentifier.of(), "mock"));
     }
 
     @Test
     public void testCanCommitWithDataValidationEx() {
-        testValidatationPropagates(new DataValidationFailedException(YangInstanceIdentifier.empty(), "mock"));
+        testValidatationPropagates(new DataValidationFailedException(YangInstanceIdentifier.of(), "mock"));
     }
 
     @Test
@@ -209,11 +210,11 @@ public class SimpleShardDataTreeCohortTest extends AbstractTest {
     }
 
     private static Future<?> abort(final ShardDataTreeCohort cohort) {
-        final CompletableFuture<Void> f = new CompletableFuture<>();
-        cohort.abort(new FutureCallback<Void>() {
+        final CompletableFuture<Empty> f = new CompletableFuture<>();
+        cohort.abort(new FutureCallback<>() {
             @Override
-            public void onSuccess(final Void result) {
-                f.complete(null);
+            public void onSuccess(final Empty result) {
+                f.complete(result);
             }
 
             @Override
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TestDistributedDataStore.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TestDistributedDataStore.java
deleted file mode 100644 (file)
index 882d0e1..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorSystem;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.controller.cluster.datastore.config.Configuration;
-import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
-import org.opendaylight.controller.cluster.datastore.shardmanager.AbstractShardManagerCreator;
-import org.opendaylight.controller.cluster.datastore.shardmanager.TestShardManager;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-
-public class TestDistributedDataStore extends DistributedDataStore implements LocalShardStore {
-
-    public TestDistributedDataStore(final ActorSystem actorSystem, final ClusterWrapper cluster,
-                                    final Configuration configuration,
-                                    final DatastoreContextFactory datastoreContextFactory,
-                                    final DatastoreSnapshot restoreFromSnapshot) {
-        super(actorSystem, cluster, configuration, datastoreContextFactory, restoreFromSnapshot);
-    }
-
-    TestDistributedDataStore(final ActorUtils actorUtils, final ClientIdentifier identifier) {
-        super(actorUtils, identifier);
-    }
-
-    @Override
-    protected AbstractShardManagerCreator<?> getShardManagerCreator() {
-        return new TestShardManager.TestShardManagerCreator();
-    }
-
-    @Override
-    public TestShardManager.GetLocalShardsReply getLocalShards() {
-        TestShardManager.GetLocalShardsReply reply =
-            (TestShardManager.GetLocalShardsReply) getActorUtils()
-                .executeOperation(getActorUtils().getShardManager(), TestShardManager.GetLocalShards.INSTANCE);
-
-        return reply;
-    }
-}
@@ -17,12 +17,12 @@ import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.MoreExecutors;
 import com.google.common.util.concurrent.SettableFuture;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.function.Supplier;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
@@ -31,6 +31,9 @@ import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransacti
 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
+import org.opendaylight.mdsal.common.api.CommitInfo;
+import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.Future;
@@ -38,9 +41,11 @@ import scala.concurrent.Future;
 /**
  * ThreePhaseCommitCohortProxy represents a set of remote cohort proxies.
  */
-public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<ActorSelection> {
-
+@Deprecated(since = "9.0.0", forRemoval = true)
+final class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCohort {
     private static final Logger LOG = LoggerFactory.getLogger(ThreePhaseCommitCohortProxy.class);
+    private static final @NonNull ListenableFuture<Empty> IMMEDIATE_EMPTY_SUCCESS =
+        Futures.immediateFuture(Empty.value());
 
     private static final MessageSupplier COMMIT_MESSAGE_SUPPLIER = new MessageSupplier() {
         @Override
@@ -66,24 +71,47 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
         }
     };
 
+    private static final OperationCallback NO_OP_CALLBACK = new OperationCallback() {
+        @Override
+        public void run() {
+        }
+
+        @Override
+        public void success() {
+        }
+
+        @Override
+        public void failure() {
+        }
+
+        @Override
+        public void pause() {
+        }
+
+        @Override
+        public void resume() {
+        }
+    };
+
+
     private final ActorUtils actorUtils;
     private final List<CohortInfo> cohorts;
-    private final SettableFuture<Void> cohortsResolvedFuture = SettableFuture.create();
+    private final SettableFuture<Empty> cohortsResolvedFuture = SettableFuture.create();
     private final TransactionIdentifier transactionId;
     private volatile OperationCallback commitOperationCallback;
 
-    public ThreePhaseCommitCohortProxy(final ActorUtils actorUtils, final List<CohortInfo> cohorts,
+    ThreePhaseCommitCohortProxy(final ActorUtils actorUtils, final List<CohortInfo> cohorts,
             final TransactionIdentifier transactionId) {
         this.actorUtils = actorUtils;
         this.cohorts = cohorts;
         this.transactionId = requireNonNull(transactionId);
 
         if (cohorts.isEmpty()) {
-            cohortsResolvedFuture.set(null);
+            cohortsResolvedFuture.set(Empty.value());
         }
     }
 
-    private ListenableFuture<Void> resolveCohorts() {
+    private ListenableFuture<Empty> resolveCohorts() {
         if (cohortsResolvedFuture.isDone()) {
             return cohortsResolvedFuture;
         }
@@ -105,7 +133,7 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
                             info.setResolvedActor(actor);
                             if (done) {
                                 LOG.debug("Tx {}: successfully resolved all cohort actors", transactionId);
-                                cohortsResolvedFuture.set(null);
+                                cohortsResolvedFuture.set(Empty.value());
                             }
                         }
                     }
@@ -128,9 +156,9 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
         // extracted from ReadyTransactionReply messages by the Futures that were obtained earlier
         // and passed to us from upstream processing. If any one fails then  we'll fail canCommit.
 
-        Futures.addCallback(resolveCohorts(), new FutureCallback<Void>() {
+        Futures.addCallback(resolveCohorts(), new FutureCallback<>() {
             @Override
-            public void onSuccess(final Void notUsed) {
+            public void onSuccess(final Empty result) {
                 finishCanCommit(returnFuture);
             }
 
@@ -143,8 +171,6 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
         return returnFuture;
     }
 
-    @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
-            justification = "https://github.com/spotbugs/spotbugs/issues/811")
     private void finishCanCommit(final SettableFuture<Boolean> returnFuture) {
         LOG.debug("Tx {} finishCanCommit", transactionId);
 
@@ -160,7 +186,7 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
 
         final Iterator<CohortInfo> iterator = cohorts.iterator();
 
-        final OnComplete<Object> onComplete = new OnComplete<Object>() {
+        final OnComplete<Object> onComplete = new OnComplete<>() {
             @Override
             public void onComplete(final Throwable failure, final Object response) {
                 if (failure != null) {
@@ -195,7 +221,7 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
                     sendCanCommitTransaction(iterator.next(), this);
                 } else {
                     LOG.debug("Tx {}: canCommit returning result: {}", transactionId, result);
-                    returnFuture.set(Boolean.valueOf(result));
+                    returnFuture.set(result);
                 }
 
             }
@@ -229,35 +255,34 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
     }
 
     @Override
-    public ListenableFuture<Void> preCommit() {
-        // We don't need to do anything here - preCommit is done atomically with the commit phase
-        // by the shard.
-        return IMMEDIATE_VOID_SUCCESS;
+    public ListenableFuture<Empty> preCommit() {
+        // We don't need to do anything here - preCommit is done atomically with the commit phase by the shard.
+        return IMMEDIATE_EMPTY_SUCCESS;
     }
 
     @Override
-    public ListenableFuture<Void> abort() {
+    public ListenableFuture<Empty> abort() {
         // Note - we pass false for propagateException. In the front-end data broker, this method
         // is called when one of the 3 phases fails with an exception. We'd rather have that
         // original exception propagated to the client. If our abort fails and we propagate the
         // exception then that exception will supersede and suppress the original exception. But
         // it's the original exception that is the root cause and of more interest to the client.
 
-        return voidOperation("abort", ABORT_MESSAGE_SUPPLIER,
-                AbortTransactionReply.class, false, OperationCallback.NO_OP_CALLBACK);
+        return operation("abort", Empty.value(), ABORT_MESSAGE_SUPPLIER, AbortTransactionReply.class, false,
+            NO_OP_CALLBACK);
     }
 
     @Override
-    public ListenableFuture<Void> commit() {
+    public ListenableFuture<? extends CommitInfo> commit() {
         OperationCallback operationCallback = commitOperationCallback != null ? commitOperationCallback :
-            OperationCallback.NO_OP_CALLBACK;
+            NO_OP_CALLBACK;
 
-        return voidOperation("commit", COMMIT_MESSAGE_SUPPLIER,
-                CommitTransactionReply.class, true, operationCallback);
+        return operation("commit", CommitInfo.empty(), COMMIT_MESSAGE_SUPPLIER, CommitTransactionReply.class, true,
+            operationCallback);
     }
 
     @SuppressWarnings("checkstyle:IllegalCatch")
-    private static boolean successfulFuture(final ListenableFuture<Void> future) {
+    private static boolean successfulFuture(final ListenableFuture<?> future) {
         if (!future.isDone()) {
             return false;
         }
@@ -270,26 +295,26 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
         }
     }
 
-    private ListenableFuture<Void> voidOperation(final String operationName,
+    private <T> ListenableFuture<T> operation(final String operationName, final T futureValue,
             final MessageSupplier messageSupplier, final Class<?> expectedResponseClass,
             final boolean propagateException, final OperationCallback callback) {
         LOG.debug("Tx {} {}", transactionId, operationName);
 
-        final SettableFuture<Void> returnFuture = SettableFuture.create();
+        final SettableFuture<T> returnFuture = SettableFuture.create();
 
         // The cohort actor list should already be built at this point by the canCommit phase but,
         // if not for some reason, we'll try to build it here.
 
-        ListenableFuture<Void> future = resolveCohorts();
+        ListenableFuture<Empty> future = resolveCohorts();
         if (successfulFuture(future)) {
-            finishVoidOperation(operationName, messageSupplier, expectedResponseClass, propagateException,
-                    returnFuture, callback);
+            finishOperation(operationName, messageSupplier, expectedResponseClass, propagateException, returnFuture,
+                futureValue, callback);
         } else {
-            Futures.addCallback(future, new FutureCallback<Void>() {
+            Futures.addCallback(future, new FutureCallback<>() {
                 @Override
-                public void onSuccess(final Void notUsed) {
-                    finishVoidOperation(operationName, messageSupplier, expectedResponseClass,
-                            propagateException, returnFuture, callback);
+                public void onSuccess(final Empty result) {
+                    finishOperation(operationName, messageSupplier, expectedResponseClass, propagateException,
+                        returnFuture, futureValue, callback);
                 }
 
                 @Override
@@ -299,7 +324,7 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
                     if (propagateException) {
                         returnFuture.setException(failure);
                     } else {
-                        returnFuture.set(null);
+                        returnFuture.set(futureValue);
                     }
                 }
             }, MoreExecutors.directExecutor());
@@ -308,9 +333,10 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
         return returnFuture;
     }
 
-    private void finishVoidOperation(final String operationName, final MessageSupplier messageSupplier,
+    private <T> void finishOperation(final String operationName, final MessageSupplier messageSupplier,
                                      final Class<?> expectedResponseClass, final boolean propagateException,
-                                     final SettableFuture<Void> returnFuture, final OperationCallback callback) {
+                                     final SettableFuture<T> returnFuture, final T futureValue,
+                                     final OperationCallback callback) {
         LOG.debug("Tx {} finish {}", transactionId, operationName);
 
         callback.resume();
@@ -341,14 +367,14 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
                         // Since the caller doesn't want us to propagate the exception we'll also
                         // not log it normally. But it's usually not good to totally silence
                         // exceptions so we'll log it to debug level.
-                        returnFuture.set(null);
+                        returnFuture.set(futureValue);
                     }
 
                     callback.failure();
                 } else {
                     LOG.debug("Tx {}: {} succeeded", transactionId, operationName);
 
-                    returnFuture.set(null);
+                    returnFuture.set(futureValue);
 
                     callback.success();
                 }
@@ -356,16 +382,6 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
         }, actorUtils.getClientDispatcher());
     }
 
-    @Override
-    List<Future<ActorSelection>> getCohortFutures() {
-        List<Future<ActorSelection>> cohortFutures = new ArrayList<>(cohorts.size());
-        for (CohortInfo info: cohorts) {
-            cohortFutures.add(info.getActorFuture());
-        }
-
-        return cohortFutures;
-    }
-
     static class CohortInfo {
         private final Future<ActorSelection> actorFuture;
         private final Supplier<Short> actorVersionSupplier;
index 45319712ae87bfd3c30cc9704bddf66d4e25fea7..e2b3872d864bc9b1f9b542efe9a5596dfbb8e4dd 100644 (file)
@@ -51,6 +51,7 @@ import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutur
 import org.opendaylight.controller.cluster.raft.TestActorFactory;
 import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 @RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
     static class TestException extends RuntimeException {
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TransactionChainProxyTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TransactionChainProxyTest.java
deleted file mode 100644 (file)
index 37cdc4a..0000000
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.ArgumentMatchers.isA;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.timeout;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.opendaylight.controller.cluster.datastore.TransactionType.READ_WRITE;
-import static org.opendaylight.controller.cluster.datastore.TransactionType.WRITE_ONLY;
-
-import akka.actor.ActorRef;
-import akka.util.Timeout;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.function.Function;
-import org.junit.Assert;
-import org.junit.Test;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import scala.concurrent.Promise;
-
-public class TransactionChainProxyTest extends AbstractTransactionProxyTest {
-    private LocalHistoryIdentifier historyId;
-
-    @Override
-    public void setUp() {
-        super.setUp();
-        historyId = MockIdentifiers.historyIdentifier(TransactionChainProxyTest.class, memberName);
-    }
-
-    @SuppressWarnings("resource")
-    @Test
-    public void testNewReadOnlyTransaction() {
-
-        DOMStoreTransaction dst = new TransactionChainProxy(mockComponentFactory, historyId).newReadOnlyTransaction();
-        Assert.assertTrue(dst instanceof DOMStoreReadTransaction);
-
-    }
-
-    @SuppressWarnings("resource")
-    @Test
-    public void testNewReadWriteTransaction() {
-        DOMStoreTransaction dst = new TransactionChainProxy(mockComponentFactory, historyId).newReadWriteTransaction();
-        Assert.assertTrue(dst instanceof DOMStoreReadWriteTransaction);
-
-    }
-
-    @SuppressWarnings("resource")
-    @Test
-    public void testNewWriteOnlyTransaction() {
-        DOMStoreTransaction dst = new TransactionChainProxy(mockComponentFactory, historyId).newWriteOnlyTransaction();
-        Assert.assertTrue(dst instanceof DOMStoreWriteTransaction);
-
-    }
-
-    @SuppressWarnings("unchecked")
-    @Test
-    public void testClose() {
-        new TransactionChainProxy(mockComponentFactory, historyId).close();
-
-        verify(mockActorContext, times(1)).broadcast(any(Function.class), any(Class.class));
-    }
-
-    @Test
-    public void testRateLimitingUsedInReadWriteTxCreation() {
-        try (TransactionChainProxy txChainProxy = new TransactionChainProxy(mockComponentFactory, historyId)) {
-
-            txChainProxy.newReadWriteTransaction();
-
-            verify(mockActorContext, times(1)).acquireTxCreationPermit();
-        }
-    }
-
-    @Test
-    public void testRateLimitingUsedInWriteOnlyTxCreation() {
-        try (TransactionChainProxy txChainProxy = new TransactionChainProxy(mockComponentFactory, historyId)) {
-
-            txChainProxy.newWriteOnlyTransaction();
-
-            verify(mockActorContext, times(1)).acquireTxCreationPermit();
-        }
-    }
-
-    @Test
-    public void testRateLimitingNotUsedInReadOnlyTxCreation() {
-        try (TransactionChainProxy txChainProxy = new TransactionChainProxy(mockComponentFactory, historyId)) {
-
-            txChainProxy.newReadOnlyTransaction();
-
-            verify(mockActorContext, times(0)).acquireTxCreationPermit();
-        }
-    }
-
-    /**
-     * Tests 2 successive chained write-only transactions and verifies the second transaction isn't
-     * initiated until the first one completes its read future.
-     */
-    @Test
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void testChainedWriteOnlyTransactions() throws Exception {
-        dataStoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
-
-        try (TransactionChainProxy txChainProxy = new TransactionChainProxy(mockComponentFactory, historyId)) {
-
-            ActorRef txActorRef1 = setupActorContextWithoutInitialCreateTransaction(getSystem());
-
-            Promise<Object> batchedReplyPromise1 = akka.dispatch.Futures.promise();
-            doReturn(batchedReplyPromise1.future()).when(mockActorContext).executeOperationAsync(
-                    eq(actorSelection(txActorRef1)), isA(BatchedModifications.class), any(Timeout.class));
-
-            DOMStoreWriteTransaction writeTx1 = txChainProxy.newWriteOnlyTransaction();
-
-            NormalizedNode writeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-            writeTx1.write(TestModel.TEST_PATH, writeNode1);
-
-            writeTx1.ready();
-
-            verify(mockActorContext, times(1)).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-
-            verifyOneBatchedModification(txActorRef1, new WriteModification(TestModel.TEST_PATH, writeNode1), true);
-
-            ActorRef txActorRef2 = setupActorContextWithoutInitialCreateTransaction(getSystem());
-
-            expectBatchedModifications(txActorRef2, 1);
-
-            final NormalizedNode writeNode2 = ImmutableNodes.containerNode(TestModel.OUTER_LIST_QNAME);
-
-            final DOMStoreWriteTransaction writeTx2 = txChainProxy.newWriteOnlyTransaction();
-
-            final AtomicReference<Exception> caughtEx = new AtomicReference<>();
-            final CountDownLatch write2Complete = new CountDownLatch(1);
-            new Thread(() -> {
-                try {
-                    writeTx2.write(TestModel.OUTER_LIST_PATH, writeNode2);
-                } catch (Exception e) {
-                    caughtEx.set(e);
-                } finally {
-                    write2Complete.countDown();
-                }
-            }).start();
-
-            assertTrue("Tx 2 write should've completed", write2Complete.await(5, TimeUnit.SECONDS));
-
-            if (caughtEx.get() != null) {
-                throw caughtEx.get();
-            }
-
-            try {
-                verify(mockActorContext, times(1)).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-            } catch (AssertionError e) {
-                fail("Tx 2 should not have initiated until the Tx 1's ready future completed");
-            }
-
-            batchedReplyPromise1.success(readyTxReply(txActorRef1.path().toString()).value().get().get());
-
-            // Tx 2 should've proceeded to find the primary shard.
-            verify(mockActorContext, timeout(5000).times(2)).findPrimaryShardAsync(
-                    eq(DefaultShardStrategy.DEFAULT_SHARD));
-        }
-    }
-
-    /**
-     * Tests 2 successive chained read-write transactions and verifies the second transaction isn't
-     * initiated until the first one completes its read future.
-     */
-    @Test
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void testChainedReadWriteTransactions() throws Exception {
-        try (TransactionChainProxy txChainProxy = new TransactionChainProxy(mockComponentFactory, historyId)) {
-
-            ActorRef txActorRef1 = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
-            expectBatchedModifications(txActorRef1, 1);
-
-            Promise<Object> readyReplyPromise1 = akka.dispatch.Futures.promise();
-            doReturn(readyReplyPromise1.future()).when(mockActorContext).executeOperationAsync(
-                    eq(actorSelection(txActorRef1)), isA(BatchedModifications.class), any(Timeout.class));
-
-            DOMStoreWriteTransaction writeTx1 = txChainProxy.newReadWriteTransaction();
-
-            NormalizedNode writeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-            writeTx1.write(TestModel.TEST_PATH, writeNode1);
-
-            writeTx1.ready();
-
-            verifyOneBatchedModification(txActorRef1, new WriteModification(TestModel.TEST_PATH, writeNode1), true);
-
-            String tx2MemberName = "mock-member";
-            ActorRef shardActorRef2 = setupActorContextWithoutInitialCreateTransaction(getSystem());
-            ActorRef txActorRef2 = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE,
-                    DataStoreVersions.CURRENT_VERSION, tx2MemberName, shardActorRef2);
-
-            expectBatchedModifications(txActorRef2, 1);
-
-            final NormalizedNode writeNode2 = ImmutableNodes.containerNode(TestModel.OUTER_LIST_QNAME);
-
-            final DOMStoreWriteTransaction writeTx2 = txChainProxy.newReadWriteTransaction();
-
-            final AtomicReference<Exception> caughtEx = new AtomicReference<>();
-            final CountDownLatch write2Complete = new CountDownLatch(1);
-            new Thread(() -> {
-                try {
-                    writeTx2.write(TestModel.OUTER_LIST_PATH, writeNode2);
-                } catch (Exception e) {
-                    caughtEx.set(e);
-                } finally {
-                    write2Complete.countDown();
-                }
-            }).start();
-
-            assertTrue("Tx 2 write should've completed", write2Complete.await(5, TimeUnit.SECONDS));
-
-            if (caughtEx.get() != null) {
-                throw caughtEx.get();
-            }
-
-            try {
-                verify(mockActorContext, never()).executeOperationAsync(
-                        eq(getSystem().actorSelection(shardActorRef2.path())),
-                        eqCreateTransaction(tx2MemberName, READ_WRITE));
-            } catch (AssertionError e) {
-                fail("Tx 2 should not have initiated until the Tx 1's ready future completed");
-            }
-
-            readyReplyPromise1.success(readyTxReply(txActorRef1.path().toString()).value().get().get());
-
-            verify(mockActorContext, timeout(5000)).executeOperationAsync(
-                    eq(getSystem().actorSelection(shardActorRef2.path())),
-                    eqCreateTransaction(tx2MemberName, READ_WRITE), any(Timeout.class));
-        }
-    }
-
-    @Test(expected = IllegalStateException.class)
-    public void testChainedWriteTransactionsWithPreviousTxNotReady() {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
-        expectBatchedModifications(actorRef, 1);
-
-        try (TransactionChainProxy txChainProxy = new TransactionChainProxy(mockComponentFactory, historyId)) {
-
-            DOMStoreWriteTransaction writeTx1 = txChainProxy.newWriteOnlyTransaction();
-
-            NormalizedNode writeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-            writeTx1.write(TestModel.TEST_PATH, writeNode1);
-
-            txChainProxy.newWriteOnlyTransaction();
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TransactionProxyTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TransactionProxyTest.java
deleted file mode 100644 (file)
index ce1d6f6..0000000
+++ /dev/null
@@ -1,1528 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyString;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.ArgumentMatchers.isA;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import static org.opendaylight.controller.cluster.datastore.TransactionType.READ_ONLY;
-import static org.opendaylight.controller.cluster.datastore.TransactionType.READ_WRITE;
-import static org.opendaylight.controller.cluster.datastore.TransactionType.WRITE_ONLY;
-
-import akka.actor.ActorRef;
-import akka.actor.ActorSelection;
-import akka.actor.ActorSystem;
-import akka.actor.Props;
-import akka.dispatch.Futures;
-import akka.util.Timeout;
-import com.google.common.base.Throwables;
-import com.google.common.collect.ImmutableSortedSet;
-import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.Collection;
-import java.util.List;
-import java.util.Optional;
-import java.util.SortedSet;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-import org.junit.Assert;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import org.mockito.InOrder;
-import org.mockito.Mockito;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.config.Configuration;
-import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
-import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
-import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
-import org.opendaylight.controller.cluster.datastore.exceptions.TimeoutException;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
-import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
-import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
-import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
-import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeAggregatorTest;
-import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
-import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
-import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.common.api.ReadFailedException;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
-import scala.concurrent.Promise;
-
-@SuppressWarnings({"resource", "checkstyle:IllegalThrows", "checkstyle:AvoidHidingCauseException"})
-public class TransactionProxyTest extends AbstractTransactionProxyTest {
-
-    @SuppressWarnings("serial")
-    static class TestException extends RuntimeException {
-    }
-
-    interface Invoker {
-        FluentFuture<?> invoke(TransactionProxy proxy);
-    }
-
-    @Test
-    public void testRead() throws Exception {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
-        doReturn(readDataReply(null)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
-        Optional<NormalizedNode> readOptional = transactionProxy.read(
-                TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
-
-        assertFalse("NormalizedNode isPresent", readOptional.isPresent());
-
-        NormalizedNode expectedNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        doReturn(readDataReply(expectedNode)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
-        readOptional = transactionProxy.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
-
-        assertTrue("NormalizedNode isPresent", readOptional.isPresent());
-
-        assertEquals("Response NormalizedNode", expectedNode, readOptional.get());
-    }
-
-    @Test(expected = ReadFailedException.class)
-    public void testReadWithInvalidReplyMessageType() throws Throwable {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
-
-        doReturn(Futures.successful(new Object())).when(mockActorContext)
-                .executeOperationAsync(eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
-        try {
-            transactionProxy.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
-        } catch (ExecutionException e) {
-            throw e.getCause();
-        }
-    }
-
-    @Test(expected = TestException.class)
-    public void testReadWithAsyncRemoteOperatonFailure() throws Throwable {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
-
-        doReturn(Futures.failed(new TestException())).when(mockActorContext)
-                .executeOperationAsync(eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
-        propagateReadFailedExceptionCause(transactionProxy.read(TestModel.TEST_PATH));
-    }
-
-    private void testExceptionOnInitialCreateTransaction(final Exception exToThrow, final Invoker invoker)
-            throws Throwable {
-        ActorRef actorRef = getSystem().actorOf(Props.create(DoNothingActor.class));
-
-        if (exToThrow instanceof PrimaryNotFoundException) {
-            doReturn(Futures.failed(exToThrow)).when(mockActorContext).findPrimaryShardAsync(anyString());
-        } else {
-            doReturn(primaryShardInfoReply(getSystem(), actorRef)).when(mockActorContext)
-                    .findPrimaryShardAsync(anyString());
-        }
-
-        doReturn(Futures.failed(exToThrow)).when(mockActorContext).executeOperationAsync(
-                any(ActorSelection.class), any(), any(Timeout.class));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
-        propagateReadFailedExceptionCause(invoker.invoke(transactionProxy));
-    }
-
-    private void testReadWithExceptionOnInitialCreateTransaction(final Exception exToThrow) throws Throwable {
-        testExceptionOnInitialCreateTransaction(exToThrow, proxy -> proxy.read(TestModel.TEST_PATH));
-    }
-
-    @Test(expected = PrimaryNotFoundException.class)
-    public void testReadWhenAPrimaryNotFoundExceptionIsThrown() throws Throwable {
-        testReadWithExceptionOnInitialCreateTransaction(new PrimaryNotFoundException("test"));
-    }
-
-    @Test(expected = TestException.class)
-    public void testReadWhenATimeoutExceptionIsThrown() throws Throwable {
-        testReadWithExceptionOnInitialCreateTransaction(new TimeoutException("test",
-                new TestException()));
-    }
-
-    @Test(expected = TestException.class)
-    public void testReadWhenAnyOtherExceptionIsThrown() throws Throwable {
-        testReadWithExceptionOnInitialCreateTransaction(new TestException());
-    }
-
-    @Test
-    public void testReadWithPriorRecordingOperationSuccessful() throws Exception {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
-        NormalizedNode expectedNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        expectBatchedModifications(actorRef, 1);
-
-        doReturn(readDataReply(expectedNode)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
-        transactionProxy.write(TestModel.TEST_PATH, expectedNode);
-
-        Optional<NormalizedNode> readOptional = transactionProxy.read(
-                TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
-
-        assertTrue("NormalizedNode isPresent", readOptional.isPresent());
-        assertEquals("Response NormalizedNode", expectedNode, readOptional.get());
-
-        InOrder inOrder = Mockito.inOrder(mockActorContext);
-        inOrder.verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), isA(BatchedModifications.class), any(Timeout.class));
-
-        inOrder.verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-    }
-
-    @Test(expected = IllegalStateException.class)
-    public void testReadPreConditionCheck() {
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-        transactionProxy.read(TestModel.TEST_PATH);
-    }
-
-    @Test(expected = IllegalArgumentException.class)
-    public void testInvalidCreateTransactionReply() throws Throwable {
-        ActorRef actorRef = getSystem().actorOf(Props.create(DoNothingActor.class));
-
-        doReturn(getSystem().actorSelection(actorRef.path())).when(mockActorContext)
-                .actorSelection(actorRef.path().toString());
-
-        doReturn(primaryShardInfoReply(getSystem(), actorRef)).when(mockActorContext)
-                .findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-
-        doReturn(Futures.successful(new Object())).when(mockActorContext).executeOperationAsync(
-            eq(getSystem().actorSelection(actorRef.path())), eqCreateTransaction(memberName, READ_ONLY),
-            any(Timeout.class));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
-        propagateReadFailedExceptionCause(transactionProxy.read(TestModel.TEST_PATH));
-    }
-
-    @Test
-    public void testExists() throws Exception {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
-        doReturn(dataExistsReply(false)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqDataExists(), any(Timeout.class));
-
-        Boolean exists = transactionProxy.exists(TestModel.TEST_PATH).get();
-
-        assertEquals("Exists response", Boolean.FALSE, exists);
-
-        doReturn(dataExistsReply(true)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqDataExists(), any(Timeout.class));
-
-        exists = transactionProxy.exists(TestModel.TEST_PATH).get();
-
-        assertEquals("Exists response", Boolean.TRUE, exists);
-    }
-
-    @Test(expected = PrimaryNotFoundException.class)
-    public void testExistsWhenAPrimaryNotFoundExceptionIsThrown() throws Throwable {
-        testExceptionOnInitialCreateTransaction(new PrimaryNotFoundException("test"),
-            proxy -> proxy.exists(TestModel.TEST_PATH));
-    }
-
-    @Test(expected = ReadFailedException.class)
-    public void testExistsWithInvalidReplyMessageType() throws Throwable {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
-
-        doReturn(Futures.successful(new Object())).when(mockActorContext)
-                .executeOperationAsync(eq(actorSelection(actorRef)), eqDataExists(), any(Timeout.class));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
-        try {
-            transactionProxy.exists(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
-        } catch (ExecutionException e) {
-            throw e.getCause();
-        }
-    }
-
-    @Test(expected = TestException.class)
-    public void testExistsWithAsyncRemoteOperatonFailure() throws Throwable {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
-
-        doReturn(Futures.failed(new TestException())).when(mockActorContext)
-                .executeOperationAsync(eq(actorSelection(actorRef)), eqDataExists(), any(Timeout.class));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
-        propagateReadFailedExceptionCause(transactionProxy.exists(TestModel.TEST_PATH));
-    }
-
-    @Test
-    public void testExistsWithPriorRecordingOperationSuccessful() throws Exception {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
-        NormalizedNode nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        expectBatchedModifications(actorRef, 1);
-
-        doReturn(dataExistsReply(true)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqDataExists(), any(Timeout.class));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
-        transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-        Boolean exists = transactionProxy.exists(TestModel.TEST_PATH).get();
-
-        assertEquals("Exists response", Boolean.TRUE, exists);
-
-        InOrder inOrder = Mockito.inOrder(mockActorContext);
-        inOrder.verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), isA(BatchedModifications.class), any(Timeout.class));
-
-        inOrder.verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqDataExists(), any(Timeout.class));
-    }
-
-    @Test(expected = IllegalStateException.class)
-    public void testExistsPreConditionCheck() {
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-        transactionProxy.exists(TestModel.TEST_PATH);
-    }
-
-    @Test
-    public void testWrite() {
-        dataStoreContextBuilder.shardBatchedModificationCount(1);
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
-        NormalizedNode nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        expectBatchedModifications(actorRef, 1);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
-        transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-        verifyOneBatchedModification(actorRef, new WriteModification(TestModel.TEST_PATH, nodeToWrite), false);
-    }
-
-    @Test
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void testWriteAfterAsyncRead() throws Exception {
-        ActorRef actorRef = setupActorContextWithoutInitialCreateTransaction(getSystem(),
-                DefaultShardStrategy.DEFAULT_SHARD);
-
-        Promise<Object> createTxPromise = akka.dispatch.Futures.promise();
-        doReturn(createTxPromise).when(mockActorContext).executeOperationAsync(
-                eq(getSystem().actorSelection(actorRef.path())),
-                eqCreateTransaction(memberName, READ_WRITE), any(Timeout.class));
-
-        doReturn(readDataReply(null)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
-        expectBatchedModificationsReady(actorRef);
-
-        final NormalizedNode nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        final TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
-        final CountDownLatch readComplete = new CountDownLatch(1);
-        final AtomicReference<Throwable> caughtEx = new AtomicReference<>();
-        com.google.common.util.concurrent.Futures.addCallback(transactionProxy.read(TestModel.TEST_PATH),
-                new  FutureCallback<Optional<NormalizedNode>>() {
-                    @Override
-                    public void onSuccess(final Optional<NormalizedNode> result) {
-                        try {
-                            transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-                        } catch (Exception e) {
-                            caughtEx.set(e);
-                        } finally {
-                            readComplete.countDown();
-                        }
-                    }
-
-                    @Override
-                    public void onFailure(final Throwable failure) {
-                        caughtEx.set(failure);
-                        readComplete.countDown();
-                    }
-                }, MoreExecutors.directExecutor());
-
-        createTxPromise.success(createTransactionReply(actorRef, DataStoreVersions.CURRENT_VERSION));
-
-        Uninterruptibles.awaitUninterruptibly(readComplete, 5, TimeUnit.SECONDS);
-
-        final Throwable t = caughtEx.get();
-        if (t != null) {
-            Throwables.propagateIfPossible(t, Exception.class);
-            throw new RuntimeException(t);
-        }
-
-        // This sends the batched modification.
-        transactionProxy.ready();
-
-        verifyOneBatchedModification(actorRef, new WriteModification(TestModel.TEST_PATH, nodeToWrite), true);
-    }
-
-    @Test(expected = IllegalStateException.class)
-    public void testWritePreConditionCheck() {
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-        transactionProxy.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
-    }
-
-    @Test(expected = IllegalStateException.class)
-    public void testWriteAfterReadyPreConditionCheck() {
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
-        transactionProxy.ready();
-
-        transactionProxy.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
-    }
-
-    @Test
-    public void testMerge() {
-        dataStoreContextBuilder.shardBatchedModificationCount(1);
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
-        NormalizedNode nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        expectBatchedModifications(actorRef, 1);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
-        transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
-
-        verifyOneBatchedModification(actorRef, new MergeModification(TestModel.TEST_PATH, nodeToWrite), false);
-    }
-
-    @Test
-    public void testDelete() {
-        dataStoreContextBuilder.shardBatchedModificationCount(1);
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
-        expectBatchedModifications(actorRef, 1);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
-        transactionProxy.delete(TestModel.TEST_PATH);
-
-        verifyOneBatchedModification(actorRef, new DeleteModification(TestModel.TEST_PATH), false);
-    }
-
-    @Test
-    public void testReadWrite() {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
-        final NormalizedNode nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        doReturn(readDataReply(null)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
-        expectBatchedModifications(actorRef, 1);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
-        transactionProxy.read(TestModel.TEST_PATH);
-
-        transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-        transactionProxy.read(TestModel.TEST_PATH);
-
-        transactionProxy.read(TestModel.TEST_PATH);
-
-        List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
-        assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
-
-        verifyBatchedModifications(batchedModifications.get(0), false,
-                new WriteModification(TestModel.TEST_PATH, nodeToWrite));
-    }
-
-    @Test
-    public void testReadyWithReadWrite() {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
-        final NormalizedNode nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        doReturn(readDataReply(null)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
-        expectBatchedModificationsReady(actorRef, true);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
-        transactionProxy.read(TestModel.TEST_PATH);
-
-        transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-        DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
-        assertTrue(ready instanceof SingleCommitCohortProxy);
-
-        verifyCohortFutures((SingleCommitCohortProxy)ready, new CommitTransactionReply().toSerializable());
-
-        List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
-        assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
-
-        verifyBatchedModifications(batchedModifications.get(0), true, true,
-                new WriteModification(TestModel.TEST_PATH, nodeToWrite));
-
-        assertEquals("getTotalMessageCount", 1, batchedModifications.get(0).getTotalMessagesSent());
-    }
-
-    @Test
-    public void testReadyWithNoModifications() {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
-        doReturn(readDataReply(null)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
-        expectBatchedModificationsReady(actorRef, true);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
-        transactionProxy.read(TestModel.TEST_PATH);
-
-        DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
-        assertTrue(ready instanceof SingleCommitCohortProxy);
-
-        verifyCohortFutures((SingleCommitCohortProxy)ready, new CommitTransactionReply().toSerializable());
-
-        List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
-        assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
-
-        verifyBatchedModifications(batchedModifications.get(0), true, true);
-    }
-
-    @Test
-    public void testReadyWithMultipleShardWrites() {
-        ActorRef actorRef1 = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
-        ActorRef actorRef2 = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY,
-                TestModel.JUNK_QNAME.getLocalName());
-
-        expectBatchedModificationsReady(actorRef1);
-        expectBatchedModificationsReady(actorRef2);
-
-        ActorRef actorRef3 = getSystem().actorOf(Props.create(DoNothingActor.class));
-
-        doReturn(getSystem().actorSelection(actorRef3.path())).when(mockActorContext)
-                .actorSelection(actorRef3.path().toString());
-
-        doReturn(Futures.successful(newPrimaryShardInfo(actorRef3, createDataTree()))).when(mockActorContext)
-                .findPrimaryShardAsync(eq(CarsModel.BASE_QNAME.getLocalName()));
-
-        expectReadyLocalTransaction(actorRef3, false);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
-        transactionProxy.write(TestModel.JUNK_PATH, ImmutableNodes.containerNode(TestModel.JUNK_QNAME));
-        transactionProxy.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
-        transactionProxy.write(CarsModel.BASE_PATH, ImmutableNodes.containerNode(CarsModel.BASE_QNAME));
-
-        DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
-        assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
-
-        verifyCohortFutures((ThreePhaseCommitCohortProxy)ready, actorSelection(actorRef1),
-                actorSelection(actorRef2), actorSelection(actorRef3));
-
-        SortedSet<String> expShardNames =
-                ImmutableSortedSet.of(DefaultShardStrategy.DEFAULT_SHARD,
-                        TestModel.JUNK_QNAME.getLocalName(), CarsModel.BASE_QNAME.getLocalName());
-
-        ArgumentCaptor<BatchedModifications> batchedMods = ArgumentCaptor.forClass(BatchedModifications.class);
-        verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef1)), batchedMods.capture(), any(Timeout.class));
-        assertTrue("Participating shards present", batchedMods.getValue().getParticipatingShardNames().isPresent());
-        assertEquals("Participating shards", expShardNames, batchedMods.getValue().getParticipatingShardNames().get());
-
-        batchedMods = ArgumentCaptor.forClass(BatchedModifications.class);
-        verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef2)), batchedMods.capture(), any(Timeout.class));
-        assertTrue("Participating shards present", batchedMods.getValue().getParticipatingShardNames().isPresent());
-        assertEquals("Participating shards", expShardNames, batchedMods.getValue().getParticipatingShardNames().get());
-
-        ArgumentCaptor<ReadyLocalTransaction> readyLocalTx = ArgumentCaptor.forClass(ReadyLocalTransaction.class);
-        verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef3)), readyLocalTx.capture(), any(Timeout.class));
-        assertTrue("Participating shards present", readyLocalTx.getValue().getParticipatingShardNames().isPresent());
-        assertEquals("Participating shards", expShardNames, readyLocalTx.getValue().getParticipatingShardNames().get());
-    }
-
-    @Test
-    public void testReadyWithWriteOnlyAndLastBatchPending() {
-        dataStoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
-
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
-        NormalizedNode nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        expectBatchedModificationsReady(actorRef, true);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
-        transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-        DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
-        assertTrue(ready instanceof SingleCommitCohortProxy);
-
-        verifyCohortFutures((SingleCommitCohortProxy)ready, new CommitTransactionReply().toSerializable());
-
-        List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
-        assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
-
-        verifyBatchedModifications(batchedModifications.get(0), true, true,
-                new WriteModification(TestModel.TEST_PATH, nodeToWrite));
-    }
-
-    @Test
-    public void testReadyWithWriteOnlyAndLastBatchEmpty() {
-        dataStoreContextBuilder.shardBatchedModificationCount(1).writeOnlyTransactionOptimizationsEnabled(true);
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
-        NormalizedNode nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        expectBatchedModificationsReady(actorRef, true);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
-        transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-        DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
-        assertTrue(ready instanceof SingleCommitCohortProxy);
-
-        verifyCohortFutures((SingleCommitCohortProxy)ready, new CommitTransactionReply().toSerializable());
-
-        List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
-        assertEquals("Captured BatchedModifications count", 2, batchedModifications.size());
-
-        verifyBatchedModifications(batchedModifications.get(0), false,
-                new WriteModification(TestModel.TEST_PATH, nodeToWrite));
-
-        verifyBatchedModifications(batchedModifications.get(1), true, true);
-    }
-
-    @Test
-    public void testReadyWithReplyFailure() {
-        dataStoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
-
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
-        NormalizedNode nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        expectFailedBatchedModifications(actorRef);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
-        transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
-
-        DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
-        assertTrue(ready instanceof SingleCommitCohortProxy);
-
-        verifyCohortFutures((SingleCommitCohortProxy)ready, TestException.class);
-    }
-
-    @Test
-    public void testReadyWithDebugContextEnabled() {
-        dataStoreContextBuilder.transactionDebugContextEnabled(true);
-
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
-        expectBatchedModificationsReady(actorRef, true);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
-        transactionProxy.merge(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
-
-        DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
-        assertTrue(ready instanceof DebugThreePhaseCommitCohort);
-
-        verifyCohortFutures((DebugThreePhaseCommitCohort)ready, new CommitTransactionReply().toSerializable());
-    }
-
-    @Test
-    public void testReadyWithLocalTransaction() {
-        ActorRef shardActorRef = getSystem().actorOf(Props.create(DoNothingActor.class));
-
-        doReturn(getSystem().actorSelection(shardActorRef.path())).when(mockActorContext)
-                .actorSelection(shardActorRef.path().toString());
-
-        doReturn(Futures.successful(newPrimaryShardInfo(shardActorRef, createDataTree()))).when(mockActorContext)
-                .findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
-        expectReadyLocalTransaction(shardActorRef, true);
-
-        NormalizedNode nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-        transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-        DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-        assertTrue(ready instanceof SingleCommitCohortProxy);
-        verifyCohortFutures((SingleCommitCohortProxy)ready, new CommitTransactionReply().toSerializable());
-
-        ArgumentCaptor<ReadyLocalTransaction> readyLocalTx = ArgumentCaptor.forClass(ReadyLocalTransaction.class);
-        verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(shardActorRef)), readyLocalTx.capture(), any(Timeout.class));
-        assertFalse("Participating shards present", readyLocalTx.getValue().getParticipatingShardNames().isPresent());
-    }
-
-    @Test
-    public void testReadyWithLocalTransactionWithFailure() {
-        ActorRef shardActorRef = getSystem().actorOf(Props.create(DoNothingActor.class));
-
-        doReturn(getSystem().actorSelection(shardActorRef.path())).when(mockActorContext)
-                .actorSelection(shardActorRef.path().toString());
-
-        DataTree mockDataTree = createDataTree();
-        DataTreeModification mockModification = mockDataTree.takeSnapshot().newModification();
-        doThrow(new RuntimeException("mock")).when(mockModification).ready();
-
-        doReturn(Futures.successful(newPrimaryShardInfo(shardActorRef, mockDataTree))).when(mockActorContext)
-                .findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
-        expectReadyLocalTransaction(shardActorRef, true);
-
-        NormalizedNode nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-        transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-        DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-        assertTrue(ready instanceof SingleCommitCohortProxy);
-        verifyCohortFutures((SingleCommitCohortProxy)ready, RuntimeException.class);
-    }
-
-    private void testWriteOnlyTxWithFindPrimaryShardFailure(final Exception toThrow) {
-        doReturn(Futures.failed(toThrow)).when(mockActorContext).findPrimaryShardAsync(anyString());
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
-        NormalizedNode nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
-
-        transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-        transactionProxy.delete(TestModel.TEST_PATH);
-
-        DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
-        assertTrue(ready instanceof SingleCommitCohortProxy);
-
-        verifyCohortFutures((SingleCommitCohortProxy)ready, toThrow.getClass());
-    }
-
-    @Test
-    public void testWriteOnlyTxWithPrimaryNotFoundException() {
-        testWriteOnlyTxWithFindPrimaryShardFailure(new PrimaryNotFoundException("mock"));
-    }
-
-    @Test
-    public void testWriteOnlyTxWithNotInitializedException() {
-        testWriteOnlyTxWithFindPrimaryShardFailure(new NotInitializedException("mock"));
-    }
-
-    @Test
-    public void testWriteOnlyTxWithNoShardLeaderException() {
-        testWriteOnlyTxWithFindPrimaryShardFailure(new NoShardLeaderException("mock"));
-    }
-
-    @Test
-    public void testReadyWithInvalidReplyMessageType() {
-        dataStoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
-        ActorRef actorRef1 = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
-        ActorRef actorRef2 = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY,
-                TestModel.JUNK_QNAME.getLocalName());
-
-        doReturn(Futures.successful(new Object())).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef1)), isA(BatchedModifications.class), any(Timeout.class));
-
-        expectBatchedModificationsReady(actorRef2);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
-        transactionProxy.write(TestModel.JUNK_PATH, ImmutableNodes.containerNode(TestModel.JUNK_QNAME));
-        transactionProxy.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
-
-        DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
-        assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
-
-        verifyCohortFutures((ThreePhaseCommitCohortProxy)ready, actorSelection(actorRef2),
-                IllegalArgumentException.class);
-    }
-
-    @Test
-    public void testGetIdentifier() {
-        setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
-        Object id = transactionProxy.getIdentifier();
-        assertNotNull("getIdentifier returned null", id);
-        assertTrue("Invalid identifier: " + id, id.toString().contains(memberName));
-    }
-
-    @Test
-    public void testClose() {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
-        doReturn(readDataReply(null)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
-        transactionProxy.read(TestModel.TEST_PATH);
-
-        transactionProxy.close();
-
-        verify(mockActorContext).sendOperationAsync(
-                eq(actorSelection(actorRef)), isA(CloseTransaction.class));
-    }
-
-    private interface TransactionProxyOperation {
-        void run(TransactionProxy transactionProxy);
-    }
-
-    private PrimaryShardInfo newPrimaryShardInfo(final ActorRef actorRef) {
-        return new PrimaryShardInfo(getSystem().actorSelection(actorRef.path()), DataStoreVersions.CURRENT_VERSION);
-    }
-
-    private PrimaryShardInfo newPrimaryShardInfo(final ActorRef actorRef, final DataTree dataTree) {
-        return new PrimaryShardInfo(getSystem().actorSelection(actorRef.path()), DataStoreVersions.CURRENT_VERSION,
-                dataTree);
-    }
-
-    private void throttleOperation(final TransactionProxyOperation operation) {
-        throttleOperation(operation, 1, true);
-    }
-
-    private void throttleOperation(final TransactionProxyOperation operation, final int outstandingOpsLimit,
-            final boolean shardFound) {
-        throttleOperation(operation, outstandingOpsLimit, shardFound, TimeUnit.MILLISECONDS.toNanos(
-                mockActorContext.getDatastoreContext().getOperationTimeoutInMillis()));
-    }
-
-    private void throttleOperation(final TransactionProxyOperation operation, final int outstandingOpsLimit,
-            final boolean shardFound, final long expectedCompletionTime) {
-        ActorSystem actorSystem = getSystem();
-        ActorRef shardActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
-
-        // Note that we setting batchedModificationCount to one less than what we need because in TransactionProxy
-        // we now allow one extra permit to be allowed for ready
-        doReturn(dataStoreContextBuilder.operationTimeoutInSeconds(2)
-                .shardBatchedModificationCount(outstandingOpsLimit - 1).build()).when(mockActorContext)
-                        .getDatastoreContext();
-
-        doReturn(actorSystem.actorSelection(shardActorRef.path())).when(mockActorContext)
-                .actorSelection(shardActorRef.path().toString());
-
-        if (shardFound) {
-            doReturn(Futures.successful(newPrimaryShardInfo(shardActorRef))).when(mockActorContext)
-                    .findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-            doReturn(Futures.successful(newPrimaryShardInfo(shardActorRef))).when(mockActorContext)
-                    .findPrimaryShardAsync(eq("cars"));
-
-        } else {
-            doReturn(Futures.failed(new Exception("not found")))
-                    .when(mockActorContext).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-        }
-
-        doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
-                eq(actorSystem.actorSelection(shardActorRef.path())), eqCreateTransaction(memberName, READ_WRITE),
-                any(Timeout.class));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
-        long start = System.nanoTime();
-
-        operation.run(transactionProxy);
-
-        long end = System.nanoTime();
-
-        Assert.assertTrue(String.format("Expected elapsed time: %s. Actual: %s",
-                expectedCompletionTime, end - start),
-                end - start > expectedCompletionTime && end - start < expectedCompletionTime * 2);
-
-    }
-
-    private void completeOperation(final TransactionProxyOperation operation) {
-        completeOperation(operation, true);
-    }
-
-    private void completeOperation(final TransactionProxyOperation operation, final boolean shardFound) {
-        ActorSystem actorSystem = getSystem();
-        ActorRef shardActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
-
-        doReturn(actorSystem.actorSelection(shardActorRef.path())).when(mockActorContext)
-                .actorSelection(shardActorRef.path().toString());
-
-        if (shardFound) {
-            doReturn(Futures.successful(newPrimaryShardInfo(shardActorRef))).when(mockActorContext)
-                    .findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-        } else {
-            doReturn(Futures.failed(new PrimaryNotFoundException("test"))).when(mockActorContext)
-                    .findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-        }
-
-        ActorRef txActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
-        String actorPath = txActorRef.path().toString();
-        CreateTransactionReply createTransactionReply = new CreateTransactionReply(actorPath, nextTransactionId(),
-                DataStoreVersions.CURRENT_VERSION);
-
-        doReturn(actorSystem.actorSelection(actorPath)).when(mockActorContext).actorSelection(actorPath);
-
-        doReturn(Futures.successful(createTransactionReply)).when(mockActorContext).executeOperationAsync(
-                eq(actorSystem.actorSelection(shardActorRef.path())), eqCreateTransaction(memberName, READ_WRITE),
-                any(Timeout.class));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
-        long start = System.nanoTime();
-
-        operation.run(transactionProxy);
-
-        long end = System.nanoTime();
-
-        long expected = TimeUnit.MILLISECONDS.toNanos(mockActorContext.getDatastoreContext()
-                .getOperationTimeoutInMillis());
-        Assert.assertTrue(String.format("Expected elapsed time: %s. Actual: %s",
-                expected, end - start), end - start <= expected);
-    }
-
-    private void completeOperationLocal(final TransactionProxyOperation operation, final DataTree dataTree) {
-        ActorSystem actorSystem = getSystem();
-        ActorRef shardActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
-
-        doReturn(actorSystem.actorSelection(shardActorRef.path())).when(mockActorContext)
-                .actorSelection(shardActorRef.path().toString());
-
-        doReturn(Futures.successful(newPrimaryShardInfo(shardActorRef, dataTree))).when(mockActorContext)
-                .findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
-        long start = System.nanoTime();
-
-        operation.run(transactionProxy);
-
-        long end = System.nanoTime();
-
-        long expected = TimeUnit.MILLISECONDS.toNanos(mockActorContext.getDatastoreContext()
-                .getOperationTimeoutInMillis());
-        Assert.assertTrue(String.format("Expected elapsed time: %s. Actual: %s", expected, end - start),
-                end - start <= expected);
-    }
-
-    private static DataTree createDataTree() {
-        DataTree dataTree = mock(DataTree.class);
-        DataTreeSnapshot dataTreeSnapshot = mock(DataTreeSnapshot.class);
-        DataTreeModification dataTreeModification = mock(DataTreeModification.class);
-
-        doReturn(dataTreeSnapshot).when(dataTree).takeSnapshot();
-        doReturn(dataTreeModification).when(dataTreeSnapshot).newModification();
-
-        return dataTree;
-    }
-
-    private static DataTree createDataTree(final NormalizedNode readResponse) {
-        DataTree dataTree = mock(DataTree.class);
-        DataTreeSnapshot dataTreeSnapshot = mock(DataTreeSnapshot.class);
-        DataTreeModification dataTreeModification = mock(DataTreeModification.class);
-
-        doReturn(dataTreeSnapshot).when(dataTree).takeSnapshot();
-        doReturn(dataTreeModification).when(dataTreeSnapshot).newModification();
-        doReturn(Optional.of(readResponse)).when(dataTreeModification).readNode(any(YangInstanceIdentifier.class));
-
-        return dataTree;
-    }
-
-
-    @Test
-    public void testWriteCompletionForLocalShard() {
-        completeOperationLocal(transactionProxy -> {
-            NormalizedNode nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-            transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-            transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-        }, createDataTree());
-    }
-
-    @Test
-    public void testWriteThrottlingWhenShardFound() {
-        throttleOperation(transactionProxy -> {
-            NormalizedNode nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-            expectIncompleteBatchedModifications();
-
-            transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-            transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-        });
-    }
-
-    @Test
-    public void testWriteThrottlingWhenShardNotFound() {
-        // Confirm that there is no throttling when the Shard is not found
-        completeOperation(transactionProxy -> {
-            NormalizedNode nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-            expectBatchedModifications(2);
-
-            transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-            transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-        }, false);
-
-    }
-
-
-    @Test
-    public void testWriteCompletion() {
-        completeOperation(transactionProxy -> {
-            NormalizedNode nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-            expectBatchedModifications(2);
-
-            transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-            transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-        });
-    }
-
-    @Test
-    public void testMergeThrottlingWhenShardFound() {
-        throttleOperation(transactionProxy -> {
-            NormalizedNode nodeToMerge = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-            expectIncompleteBatchedModifications();
-
-            transactionProxy.merge(TestModel.TEST_PATH, nodeToMerge);
-
-            transactionProxy.merge(TestModel.TEST_PATH, nodeToMerge);
-        });
-    }
-
-    @Test
-    public void testMergeThrottlingWhenShardNotFound() {
-        completeOperation(transactionProxy -> {
-            NormalizedNode nodeToMerge = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-            expectBatchedModifications(2);
-
-            transactionProxy.merge(TestModel.TEST_PATH, nodeToMerge);
-
-            transactionProxy.merge(TestModel.TEST_PATH, nodeToMerge);
-        }, false);
-    }
-
-    @Test
-    public void testMergeCompletion() {
-        completeOperation(transactionProxy -> {
-            NormalizedNode nodeToMerge = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-            expectBatchedModifications(2);
-
-            transactionProxy.merge(TestModel.TEST_PATH, nodeToMerge);
-
-            transactionProxy.merge(TestModel.TEST_PATH, nodeToMerge);
-        });
-
-    }
-
-    @Test
-    public void testMergeCompletionForLocalShard() {
-        completeOperationLocal(transactionProxy -> {
-            NormalizedNode nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-            transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
-
-            transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
-
-        }, createDataTree());
-    }
-
-
-    @Test
-    public void testDeleteThrottlingWhenShardFound() {
-
-        throttleOperation(transactionProxy -> {
-            expectIncompleteBatchedModifications();
-
-            transactionProxy.delete(TestModel.TEST_PATH);
-
-            transactionProxy.delete(TestModel.TEST_PATH);
-        });
-    }
-
-
-    @Test
-    public void testDeleteThrottlingWhenShardNotFound() {
-
-        completeOperation(transactionProxy -> {
-            expectBatchedModifications(2);
-
-            transactionProxy.delete(TestModel.TEST_PATH);
-
-            transactionProxy.delete(TestModel.TEST_PATH);
-        }, false);
-    }
-
-    @Test
-    public void testDeleteCompletionForLocalShard() {
-        completeOperationLocal(transactionProxy -> {
-
-            transactionProxy.delete(TestModel.TEST_PATH);
-
-            transactionProxy.delete(TestModel.TEST_PATH);
-        }, createDataTree());
-
-    }
-
-    @Test
-    public void testDeleteCompletion() {
-        completeOperation(transactionProxy -> {
-            expectBatchedModifications(2);
-
-            transactionProxy.delete(TestModel.TEST_PATH);
-
-            transactionProxy.delete(TestModel.TEST_PATH);
-        });
-
-    }
-
-    @Test
-    public void testReadThrottlingWhenShardFound() {
-
-        throttleOperation(transactionProxy -> {
-            doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
-                    any(ActorSelection.class), eqReadData());
-
-            transactionProxy.read(TestModel.TEST_PATH);
-
-            transactionProxy.read(TestModel.TEST_PATH);
-        });
-    }
-
-    @Test
-    public void testReadThrottlingWhenShardNotFound() {
-
-        completeOperation(transactionProxy -> {
-            doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
-                    any(ActorSelection.class), eqReadData());
-
-            transactionProxy.read(TestModel.TEST_PATH);
-
-            transactionProxy.read(TestModel.TEST_PATH);
-        }, false);
-    }
-
-
-    @Test
-    public void testReadCompletion() {
-        completeOperation(transactionProxy -> {
-            NormalizedNode nodeToRead = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-            doReturn(readDataReply(nodeToRead)).when(mockActorContext).executeOperationAsync(
-                    any(ActorSelection.class), eqReadData(), any(Timeout.class));
-
-            transactionProxy.read(TestModel.TEST_PATH);
-
-            transactionProxy.read(TestModel.TEST_PATH);
-        });
-
-    }
-
-    @Test
-    public void testReadCompletionForLocalShard() {
-        final NormalizedNode nodeToRead = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-        completeOperationLocal(transactionProxy -> {
-            transactionProxy.read(TestModel.TEST_PATH);
-
-            transactionProxy.read(TestModel.TEST_PATH);
-        }, createDataTree(nodeToRead));
-
-    }
-
-    @Test
-    public void testReadCompletionForLocalShardWhenExceptionOccurs() {
-        completeOperationLocal(transactionProxy -> {
-            transactionProxy.read(TestModel.TEST_PATH);
-
-            transactionProxy.read(TestModel.TEST_PATH);
-        }, createDataTree());
-
-    }
-
-    @Test
-    public void testExistsThrottlingWhenShardFound() {
-
-        throttleOperation(transactionProxy -> {
-            doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
-                    any(ActorSelection.class), eqDataExists());
-
-            transactionProxy.exists(TestModel.TEST_PATH);
-
-            transactionProxy.exists(TestModel.TEST_PATH);
-        });
-    }
-
-    @Test
-    public void testExistsThrottlingWhenShardNotFound() {
-
-        completeOperation(transactionProxy -> {
-            doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
-                    any(ActorSelection.class), eqDataExists());
-
-            transactionProxy.exists(TestModel.TEST_PATH);
-
-            transactionProxy.exists(TestModel.TEST_PATH);
-        }, false);
-    }
-
-
-    @Test
-    public void testExistsCompletion() {
-        completeOperation(transactionProxy -> {
-            doReturn(dataExistsReply(true)).when(mockActorContext).executeOperationAsync(
-                    any(ActorSelection.class), eqDataExists(), any(Timeout.class));
-
-            transactionProxy.exists(TestModel.TEST_PATH);
-
-            transactionProxy.exists(TestModel.TEST_PATH);
-        });
-
-    }
-
-    @Test
-    public void testExistsCompletionForLocalShard() {
-        final NormalizedNode nodeToRead = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-        completeOperationLocal(transactionProxy -> {
-            transactionProxy.exists(TestModel.TEST_PATH);
-
-            transactionProxy.exists(TestModel.TEST_PATH);
-        }, createDataTree(nodeToRead));
-
-    }
-
-    @Test
-    public void testExistsCompletionForLocalShardWhenExceptionOccurs() {
-        completeOperationLocal(transactionProxy -> {
-            transactionProxy.exists(TestModel.TEST_PATH);
-
-            transactionProxy.exists(TestModel.TEST_PATH);
-        }, createDataTree());
-
-    }
-
-    @Test
-    public void testReadyThrottling() {
-
-        throttleOperation(transactionProxy -> {
-            NormalizedNode nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-            expectBatchedModifications(1);
-
-            transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-            transactionProxy.ready();
-        });
-    }
-
-    @Test
-    public void testReadyThrottlingWithTwoTransactionContexts() {
-        throttleOperation(transactionProxy -> {
-            NormalizedNode nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-            NormalizedNode carsNode = ImmutableNodes.containerNode(CarsModel.BASE_QNAME);
-
-            expectBatchedModifications(2);
-
-            transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-            // Trying to write to Cars will cause another transaction context to get created
-            transactionProxy.write(CarsModel.BASE_PATH, carsNode);
-
-            // Now ready should block for both transaction contexts
-            transactionProxy.ready();
-        }, 1, true, TimeUnit.MILLISECONDS.toNanos(mockActorContext.getDatastoreContext()
-                .getOperationTimeoutInMillis()) * 2);
-    }
-
-    private void testModificationOperationBatching(final TransactionType type) {
-        int shardBatchedModificationCount = 3;
-        dataStoreContextBuilder.shardBatchedModificationCount(shardBatchedModificationCount);
-
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), type);
-
-        expectBatchedModifications(actorRef, shardBatchedModificationCount);
-
-        YangInstanceIdentifier writePath1 = TestModel.TEST_PATH;
-        NormalizedNode writeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        YangInstanceIdentifier writePath2 = TestModel.OUTER_LIST_PATH;
-        NormalizedNode writeNode2 = ImmutableNodes.containerNode(TestModel.OUTER_LIST_QNAME);
-
-        YangInstanceIdentifier writePath3 = TestModel.INNER_LIST_PATH;
-        NormalizedNode writeNode3 = ImmutableNodes.containerNode(TestModel.INNER_LIST_QNAME);
-
-        YangInstanceIdentifier mergePath1 = TestModel.TEST_PATH;
-        NormalizedNode mergeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        YangInstanceIdentifier mergePath2 = TestModel.OUTER_LIST_PATH;
-        NormalizedNode mergeNode2 = ImmutableNodes.containerNode(TestModel.OUTER_LIST_QNAME);
-
-        YangInstanceIdentifier mergePath3 = TestModel.INNER_LIST_PATH;
-        NormalizedNode mergeNode3 = ImmutableNodes.containerNode(TestModel.INNER_LIST_QNAME);
-
-        YangInstanceIdentifier deletePath1 = TestModel.TEST_PATH;
-        YangInstanceIdentifier deletePath2 = TestModel.OUTER_LIST_PATH;
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, type);
-
-        transactionProxy.write(writePath1, writeNode1);
-        transactionProxy.write(writePath2, writeNode2);
-        transactionProxy.delete(deletePath1);
-        transactionProxy.merge(mergePath1, mergeNode1);
-        transactionProxy.merge(mergePath2, mergeNode2);
-        transactionProxy.write(writePath3, writeNode3);
-        transactionProxy.merge(mergePath3, mergeNode3);
-        transactionProxy.delete(deletePath2);
-
-        // This sends the last batch.
-        transactionProxy.ready();
-
-        List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
-        assertEquals("Captured BatchedModifications count", 3, batchedModifications.size());
-
-        verifyBatchedModifications(batchedModifications.get(0), false, new WriteModification(writePath1, writeNode1),
-                new WriteModification(writePath2, writeNode2), new DeleteModification(deletePath1));
-
-        verifyBatchedModifications(batchedModifications.get(1), false, new MergeModification(mergePath1, mergeNode1),
-                new MergeModification(mergePath2, mergeNode2), new WriteModification(writePath3, writeNode3));
-
-        verifyBatchedModifications(batchedModifications.get(2), true, true,
-                new MergeModification(mergePath3, mergeNode3), new DeleteModification(deletePath2));
-
-        assertEquals("getTotalMessageCount", 3, batchedModifications.get(2).getTotalMessagesSent());
-    }
-
-    @Test
-    public void testReadWriteModificationOperationBatching() {
-        testModificationOperationBatching(READ_WRITE);
-    }
-
-    @Test
-    public void testWriteOnlyModificationOperationBatching() {
-        testModificationOperationBatching(WRITE_ONLY);
-    }
-
-    @Test
-    public void testOptimizedWriteOnlyModificationOperationBatching() {
-        dataStoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
-        testModificationOperationBatching(WRITE_ONLY);
-    }
-
-    @Test
-    public void testModificationOperationBatchingWithInterleavedReads() throws Exception {
-
-        int shardBatchedModificationCount = 10;
-        dataStoreContextBuilder.shardBatchedModificationCount(shardBatchedModificationCount);
-
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
-        expectBatchedModifications(actorRef, shardBatchedModificationCount);
-
-        final YangInstanceIdentifier writePath1 = TestModel.TEST_PATH;
-        final NormalizedNode writeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        YangInstanceIdentifier writePath2 = TestModel.OUTER_LIST_PATH;
-        NormalizedNode writeNode2 = ImmutableNodes.containerNode(TestModel.OUTER_LIST_QNAME);
-
-        final YangInstanceIdentifier mergePath1 = TestModel.TEST_PATH;
-        final NormalizedNode mergeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        YangInstanceIdentifier mergePath2 = TestModel.INNER_LIST_PATH;
-        NormalizedNode mergeNode2 = ImmutableNodes.containerNode(TestModel.INNER_LIST_QNAME);
-
-        final YangInstanceIdentifier deletePath = TestModel.OUTER_LIST_PATH;
-
-        doReturn(readDataReply(writeNode2)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(writePath2), any(Timeout.class));
-
-        doReturn(readDataReply(mergeNode2)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(mergePath2), any(Timeout.class));
-
-        doReturn(dataExistsReply(true)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqDataExists(), any(Timeout.class));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
-        transactionProxy.write(writePath1, writeNode1);
-        transactionProxy.write(writePath2, writeNode2);
-
-        Optional<NormalizedNode> readOptional = transactionProxy.read(writePath2).get(5, TimeUnit.SECONDS);
-
-        assertTrue("NormalizedNode isPresent", readOptional.isPresent());
-        assertEquals("Response NormalizedNode", writeNode2, readOptional.get());
-
-        transactionProxy.merge(mergePath1, mergeNode1);
-        transactionProxy.merge(mergePath2, mergeNode2);
-
-        readOptional = transactionProxy.read(mergePath2).get(5, TimeUnit.SECONDS);
-
-        transactionProxy.delete(deletePath);
-
-        Boolean exists = transactionProxy.exists(TestModel.TEST_PATH).get();
-        assertEquals("Exists response", Boolean.TRUE, exists);
-
-        assertTrue("NormalizedNode isPresent", readOptional.isPresent());
-        assertEquals("Response NormalizedNode", mergeNode2, readOptional.get());
-
-        List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
-        assertEquals("Captured BatchedModifications count", 3, batchedModifications.size());
-
-        verifyBatchedModifications(batchedModifications.get(0), false, new WriteModification(writePath1, writeNode1),
-                new WriteModification(writePath2, writeNode2));
-
-        verifyBatchedModifications(batchedModifications.get(1), false, new MergeModification(mergePath1, mergeNode1),
-                new MergeModification(mergePath2, mergeNode2));
-
-        verifyBatchedModifications(batchedModifications.get(2), false, new DeleteModification(deletePath));
-
-        InOrder inOrder = Mockito.inOrder(mockActorContext);
-        inOrder.verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), isA(BatchedModifications.class), any(Timeout.class));
-
-        inOrder.verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(writePath2), any(Timeout.class));
-
-        inOrder.verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), isA(BatchedModifications.class), any(Timeout.class));
-
-        inOrder.verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(mergePath2), any(Timeout.class));
-
-        inOrder.verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), isA(BatchedModifications.class), any(Timeout.class));
-
-        inOrder.verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqDataExists(), any(Timeout.class));
-    }
-
-    @Test
-    public void testReadRoot() throws InterruptedException, ExecutionException, java.util.concurrent.TimeoutException {
-        EffectiveModelContext schemaContext = SchemaContextHelper.full();
-        Configuration configuration = mock(Configuration.class);
-        doReturn(configuration).when(mockActorContext).getConfiguration();
-        doReturn(schemaContext).when(mockActorContext).getSchemaContext();
-        doReturn(Sets.newHashSet("test", "cars")).when(configuration).getAllShardNames();
-
-        NormalizedNode expectedNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-        NormalizedNode expectedNode2 = ImmutableNodes.containerNode(CarsModel.CARS_QNAME);
-
-        setUpReadData("test", NormalizedNodeAggregatorTest.getRootNode(expectedNode1, schemaContext));
-        setUpReadData("cars", NormalizedNodeAggregatorTest.getRootNode(expectedNode2, schemaContext));
-
-        doReturn(MemberName.forName(memberName)).when(mockActorContext).getCurrentMemberName();
-
-        doReturn(getSystem().dispatchers().defaultGlobalDispatcher()).when(mockActorContext).getClientDispatcher();
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
-        Optional<NormalizedNode> readOptional = transactionProxy.read(
-                YangInstanceIdentifier.empty()).get(5, TimeUnit.SECONDS);
-
-        assertTrue("NormalizedNode isPresent", readOptional.isPresent());
-
-        NormalizedNode normalizedNode = readOptional.get();
-
-        assertTrue("Expect value to be a Collection", normalizedNode.body() instanceof Collection);
-
-        @SuppressWarnings("unchecked")
-        Collection<NormalizedNode> collection = (Collection<NormalizedNode>) normalizedNode.body();
-
-        for (NormalizedNode node : collection) {
-            assertTrue("Expected " + node + " to be a ContainerNode", node instanceof ContainerNode);
-        }
-
-        assertTrue("Child with QName = " + TestModel.TEST_QNAME + " not found",
-                NormalizedNodeAggregatorTest.findChildWithQName(collection, TestModel.TEST_QNAME) != null);
-
-        assertEquals(expectedNode1, NormalizedNodeAggregatorTest.findChildWithQName(collection, TestModel.TEST_QNAME));
-
-        assertTrue("Child with QName = " + CarsModel.BASE_QNAME + " not found",
-                NormalizedNodeAggregatorTest.findChildWithQName(collection, CarsModel.BASE_QNAME) != null);
-
-        assertEquals(expectedNode2, NormalizedNodeAggregatorTest.findChildWithQName(collection, CarsModel.BASE_QNAME));
-    }
-
-
-    private void setUpReadData(final String shardName, final NormalizedNode expectedNode) {
-        ActorSystem actorSystem = getSystem();
-        ActorRef shardActorRef = getSystem().actorOf(Props.create(DoNothingActor.class));
-
-        doReturn(getSystem().actorSelection(shardActorRef.path())).when(mockActorContext)
-                .actorSelection(shardActorRef.path().toString());
-
-        doReturn(primaryShardInfoReply(getSystem(), shardActorRef)).when(mockActorContext)
-                .findPrimaryShardAsync(eq(shardName));
-
-        ActorRef txActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
-
-        doReturn(actorSystem.actorSelection(txActorRef.path())).when(mockActorContext)
-                .actorSelection(txActorRef.path().toString());
-
-        doReturn(Futures.successful(createTransactionReply(txActorRef, DataStoreVersions.CURRENT_VERSION)))
-                .when(mockActorContext).executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
-                        eqCreateTransaction(memberName, TransactionType.READ_ONLY), any(Timeout.class));
-
-        doReturn(readDataReply(expectedNode)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(txActorRef)), eqReadData(YangInstanceIdentifier.empty()), any(Timeout.class));
-    }
-}
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore;
 
 import com.codahale.metrics.Timer;
@@ -19,7 +18,8 @@ import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
  * TransactionRateLimitingCallback computes the new transaction rate limit on the successful completion of a
  * transaction.
  */
-public class TransactionRateLimitingCallback implements OperationCallback {
+@Deprecated(since = "9.0.0", forRemoval = true)
+final class TransactionRateLimitingCallback implements OperationCallback {
     private static Ticker TICKER = Ticker.systemTicker();
 
     private enum State {
@@ -33,7 +33,7 @@ public class TransactionRateLimitingCallback implements OperationCallback {
     private long elapsedTime;
     private volatile State state = State.STOPPED;
 
-    TransactionRateLimitingCallback(ActorUtils actorUtils) {
+    TransactionRateLimitingCallback(final ActorUtils actorUtils) {
         commitTimer = actorUtils.getOperationTimer(ActorUtils.COMMIT);
     }
 
@@ -75,7 +75,7 @@ public class TransactionRateLimitingCallback implements OperationCallback {
     }
 
     @VisibleForTesting
-    static void setTicker(Ticker ticker) {
+    static void setTicker(final Ticker ticker) {
         TICKER = ticker;
     }
 }
index 14783441b77e96ac837a784fbe2f923aa1ffbb86..b37dfb545064936ea4936671c8c07d93bf9f1122 100644 (file)
@@ -29,6 +29,7 @@ import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 @RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class TransactionRateLimitingCallbackTest {
     @Mock
index b69c6c86627b1218599fadf2bc1b50ea8107f3fa..0a5c40d29d3ae6b96f5765bfd549e10aff177a70 100644 (file)
@@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.datastore.actors;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
 
 import akka.actor.ActorRef;
 import akka.testkit.javadsl.TestKit;
@@ -40,7 +39,7 @@ public class ShardSnapshotActorTest extends AbstractActorTest {
         final ActorRef snapshotActor = getSystem().actorOf(ShardSnapshotActor.props(STREAM_FACTORY), testName);
         kit.watch(snapshotActor);
 
-        final NormalizedNode expectedRoot = snapshot.getRootNode().get();
+        final NormalizedNode expectedRoot = snapshot.getRootNode().orElseThrow();
 
         ByteArrayOutputStream installSnapshotStream = withInstallSnapshot ? new ByteArrayOutputStream() : null;
         ShardSnapshotActor.requestSnapshot(snapshotActor, snapshot,
@@ -59,10 +58,7 @@ public class ShardSnapshotActorTest extends AbstractActorTest {
             }
 
             assertEquals("Deserialized snapshot type", snapshot.getClass(), deserialized.getClass());
-
-            final Optional<NormalizedNode> maybeNode = deserialized.getRootNode();
-            assertTrue("isPresent", maybeNode.isPresent());
-            assertEquals("Root node", expectedRoot, maybeNode.get());
+            assertEquals("Root node", Optional.of(expectedRoot), deserialized.getRootNode());
         }
     }
 
index 49d33921636f204782946772c268b102d2629d69..1b033546211e21b1f64e854ac77083939a6adc71 100644 (file)
@@ -12,7 +12,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 
@@ -21,8 +21,8 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class AbortTransactionReplyTest {
-
     @Test
     public void testSerialization() {
         AbortTransactionReply expected = AbortTransactionReply.instance(DataStoreVersions.CURRENT_VERSION);
index dd1d2be95b7dbee5dde86a6d5566bf421582f775..c0300e667b5c6fb4db67495740b8ee72e7821f7a 100644 (file)
@@ -12,7 +12,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 import org.opendaylight.controller.cluster.datastore.MockIdentifiers;
@@ -22,8 +22,8 @@ import org.opendaylight.controller.cluster.datastore.MockIdentifiers;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class AbortTransactionTest {
-
     @Test
     public void testSerialization() {
         AbortTransaction expected = new AbortTransaction(
index 2cb5c81e9d0ea86022bf517d5695bd06b68c5244..69450b7efba6db3a70e8ab1aee2418374570aa08 100644 (file)
@@ -15,7 +15,7 @@ import com.google.common.collect.ImmutableSortedSet;
 import java.io.Serializable;
 import java.util.Optional;
 import java.util.SortedSet;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.AbstractTest;
@@ -25,27 +25,30 @@ import org.opendaylight.controller.cluster.datastore.modification.MergeModificat
 import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
 
 /**
  * Unit tests for BatchedModifications.
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class BatchedModificationsTest extends AbstractTest {
-
     @Test
     public void testSerialization() {
         YangInstanceIdentifier writePath = TestModel.TEST_PATH;
-        NormalizedNode writeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
-                .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+        ContainerNode writeData = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+            .build();
 
         YangInstanceIdentifier mergePath = TestModel.OUTER_LIST_PATH;
-        NormalizedNode mergeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(TestModel.OUTER_LIST_QNAME)).build();
+        ContainerNode mergeData = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.OUTER_LIST_QNAME))
+            .build();
 
         YangInstanceIdentifier deletePath = TestModel.TEST_PATH;
 
index 21c4d2f673d97fd4018e7eaecca88022282ad527..5ebc282cebe0f55ec2ff470458ea51e3ae52cb25 100644 (file)
@@ -12,7 +12,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 
@@ -21,8 +21,8 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class CanCommitTransactionReplyTest {
-
     @Test
     public void testSerialization() {
         testSerialization(CanCommitTransactionReply.yes(DataStoreVersions.CURRENT_VERSION),
@@ -31,7 +31,7 @@ public class CanCommitTransactionReplyTest {
                 CanCommitTransactionReply.class);
     }
 
-    private static void testSerialization(CanCommitTransactionReply expected, Class<?> expSerialized) {
+    private static void testSerialization(final CanCommitTransactionReply expected, final Class<?> expSerialized) {
         Object serialized = expected.toSerializable();
         assertEquals("Serialized type", expSerialized, serialized.getClass());
 
index 8950c50518b8ac2d0ef59c1d8d868fe819ec65c2..806c504c5219a9297cbbf932b85846a310e83092 100644 (file)
@@ -12,7 +12,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.AbstractTest;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
@@ -22,8 +22,8 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class CanCommitTransactionTest extends AbstractTest {
-
     @Test
     public void testSerialization() {
         CanCommitTransaction expected = new CanCommitTransaction(nextTransactionId(),
index 6f857112a4f5d8bf8834b79d66e3636c4f98b625..e18acec0d2c8c84931533806258209cead6327c7 100644 (file)
@@ -12,7 +12,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.AbstractTest;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
@@ -22,17 +22,20 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class CloseTransactionChainTest extends AbstractTest {
-
     @Test
     public void testSerialization() {
-        CloseTransactionChain expected = new CloseTransactionChain(nextHistoryId(), DataStoreVersions.CURRENT_VERSION);
+        CloseTransactionChain expected = new CloseTransactionChain(newHistoryId(1), DataStoreVersions.CURRENT_VERSION);
 
-        Object serialized = expected.toSerializable();
+        var serialized = (Serializable) expected.toSerializable();
         assertEquals("Serialized type", CloseTransactionChain.class, serialized.getClass());
 
+        final byte[] bytes = SerializationUtils.serialize(serialized);
+        assertEquals(241, bytes.length);
+
         CloseTransactionChain actual = CloseTransactionChain.fromSerializable(
-                SerializationUtils.clone((Serializable) serialized));
+                SerializationUtils.deserialize(bytes));
         assertEquals("getIdentifier", expected.getIdentifier(), actual.getIdentifier());
         assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, actual.getVersion());
     }
index 8c35babbdaed6b4446e40c7ee8dc28e7477082e6..db53db4b89ec0a0ef7fe86e434b8e8a2f3412a01 100644 (file)
@@ -10,7 +10,7 @@ package org.opendaylight.controller.cluster.datastore.messages;
 import static org.junit.Assert.assertEquals;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 
@@ -19,6 +19,7 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class CloseTransactionTest {
     @Test
     public void testCloseTransactionSerialization() {
index 423411d8213ac6579b53cf611c5775b1c52465d0..1017772bd05c7a2eb6ca9a860ee0fa6c94543d35 100644 (file)
@@ -12,7 +12,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 
@@ -21,8 +21,8 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class CommitTransactionReplyTest {
-
     @Test
     public void testSerialization() {
         CommitTransactionReply expected = CommitTransactionReply.instance(DataStoreVersions.CURRENT_VERSION);
index 1fccfbdeae93d35b0b977fd4ac0b3ab3b0a48255..2ab6ca7b3a204c41900f72510130ab3c4933e423 100644 (file)
@@ -12,7 +12,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.AbstractTest;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
@@ -22,6 +22,7 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class CommitTransactionTest extends AbstractTest {
 
     @Test
index a44e71a1654993114ceaa3ace998e9d019a3daa9..5acacfe8b182587fca05202589f3f5a289b3843b 100644 (file)
@@ -12,7 +12,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.AbstractTest;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
@@ -22,8 +22,8 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class CreateTransactionReplyTest extends AbstractTest {
-
     @Test
     public void testSerialization() {
         CreateTransactionReply expected = new CreateTransactionReply("txPath", nextTransactionId(),
index 2e83a33e0ab2996b23be8c760532e5673900aea5..9d573e94883b9babc69831245d4957ffb7a0a7d1 100644 (file)
@@ -12,7 +12,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.AbstractTest;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
@@ -22,8 +22,8 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class CreateTransactionTest extends AbstractTest {
-
     @Test
     public void testSerialization() {
         CreateTransaction expected = new CreateTransaction(nextTransactionId(), 2, DataStoreVersions.CURRENT_VERSION);
index 8f33773210151cc0b0bee9e0a461dd90c650ae62..61eb8b16a13d6934609a6238357563ac72e29bef 100644 (file)
@@ -12,7 +12,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 
@@ -21,8 +21,8 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class DataExistsReplyTest {
-
     @Test
     public void testSerialization() {
         DataExistsReply expected = new DataExistsReply(true, DataStoreVersions.CURRENT_VERSION);
index b55b3effd33eb8778cbb282d1ff08edef358f15e..bcd732215036152e8199ed79f28978e305ba4247 100644 (file)
@@ -12,7 +12,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
@@ -22,8 +22,8 @@ import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class DataExistsTest {
-
     @Test
     public void testSerialization() {
         DataExists expected = new DataExists(TestModel.TEST_PATH, DataStoreVersions.CURRENT_VERSION);
index f01b09c716bf3747165792648e7735965d3fe300..9c31e1909a8cbcd30f4cb57408d1604a1304d6e6 100644 (file)
@@ -12,27 +12,28 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
 
 /**
  * Unit tests for ReadDataReply.
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class ReadDataReplyTest {
 
     @Test
     public void testSerialization() {
-        NormalizedNode data = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
-                .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+        ContainerNode data = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
 
         ReadDataReply expected = new ReadDataReply(data, DataStoreVersions.CURRENT_VERSION);
 
index 5e06a60217b73361571d8756990671db6c89fe86..df692b59d5a83e3b7f3d3a5f577333049cb1efdf 100644 (file)
@@ -12,7 +12,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
@@ -22,8 +22,8 @@ import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class ReadDataTest {
-
     @Test
     public void testSerialization() {
         ReadData expected = new ReadData(TestModel.TEST_PATH, DataStoreVersions.CURRENT_VERSION);
index fc09965ccc027b43b74f5b4cf84af987c5711c69..2abb05d360a088fe2c206d1f55f2f20d51565a7e 100644 (file)
@@ -28,19 +28,19 @@ import org.opendaylight.controller.cluster.datastore.modification.WriteModificat
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
 
 /**
  * Unit tests for ReadyLocalTransactionSerializer.
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class ReadyLocalTransactionSerializerTest extends AbstractTest {
-
     @Test
     public void testToAndFromBinary() throws NotSerializableException {
         DataTree dataTree = new InMemoryDataTreeFactory().create(
@@ -74,8 +74,7 @@ public class ReadyLocalTransactionSerializerTest extends AbstractTest {
         assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, batched.getVersion());
         assertTrue("isReady", batched.isReady());
         assertTrue("isDoCommitOnReady", batched.isDoCommitOnReady());
-        assertTrue("participatingShardNames present", batched.getParticipatingShardNames().isPresent());
-        assertEquals("participatingShardNames", shardNames, batched.getParticipatingShardNames().get());
+        assertEquals("participatingShardNames", Optional.of(shardNames), batched.getParticipatingShardNames());
 
         List<Modification> batchedMods = batched.getModifications();
         assertEquals("getModifications size", 2, batchedMods.size());
index 049c17cb437f635c293f91ec9515d727420b01b4..38eea9af5433ad59a8c5130671087ca21a6e0958 100644 (file)
@@ -10,7 +10,7 @@ package org.opendaylight.controller.cluster.datastore.messages;
 import static org.junit.Assert.assertEquals;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 
@@ -19,6 +19,7 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class ReadyTransactionReplyTest {
 
     @Test
index f2527b13abb013b475f8eeb2286ec809a9debc18..9d11ea986a901b1279e24a696e41fb5174230e89 100644 (file)
@@ -23,6 +23,7 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 public abstract class AbstractModificationTest {
     private static EffectiveModelContext TEST_SCHEMA_CONTEXT;
 
index c8b2c48ebb2366f689f74e735d4a431fe798af89..f1cb4870a3ba7c8fcce94846281ff37f7c66b222 100644 (file)
@@ -10,13 +10,14 @@ package org.opendaylight.controller.cluster.datastore.modification;
 import static org.junit.Assert.assertEquals;
 
 import java.util.Optional;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class DeleteModificationTest extends AbstractModificationTest {
     @Test
     public void testApply() throws Exception {
@@ -44,7 +45,7 @@ public class DeleteModificationTest extends AbstractModificationTest {
 
         DeleteModification expected = new DeleteModification(path);
 
-        DeleteModification clone = (DeleteModification) SerializationUtils.clone(expected);
+        DeleteModification clone = SerializationUtils.clone(expected);
         assertEquals("getPath", expected.getPath(), clone.getPath());
     }
 }
index f5ada9ffe37f79f444c41ccc4a0eb2063b8d5858..ff22645f92054c29a28af0b2f7a2368b00917c74 100644 (file)
@@ -10,15 +10,15 @@ package org.opendaylight.controller.cluster.datastore.modification;
 import static org.junit.Assert.assertEquals;
 
 import java.util.Optional;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class MergeModificationTest extends AbstractModificationTest {
     @Test
     public void testApply() throws Exception {
@@ -37,14 +37,12 @@ public class MergeModificationTest extends AbstractModificationTest {
 
     @Test
     public void testSerialization() {
-        YangInstanceIdentifier path = TestModel.TEST_PATH;
-        NormalizedNode data = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
-                .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+        MergeModification expected = new MergeModification(TestModel.TEST_PATH, Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+            .build());
 
-        MergeModification expected = new MergeModification(path, data);
-
-        MergeModification clone = (MergeModification) SerializationUtils.clone(expected);
+        MergeModification clone = SerializationUtils.clone(expected);
         assertEquals("getPath", expected.getPath(), clone.getPath());
         assertEquals("getData", expected.getData(), clone.getData());
     }
index 35ee8fc5bb61077377c6d580efc30d18c75e50b3..3d58db873d99878c3412626c6efc6331ff2469cf 100644 (file)
@@ -9,16 +9,18 @@ package org.opendaylight.controller.cluster.datastore.modification;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class MutableCompositeModificationTest extends AbstractModificationTest {
     @Test
     public void testApply() throws Exception {
@@ -30,24 +32,26 @@ public class MutableCompositeModificationTest extends AbstractModificationTest {
         compositeModification.apply(transaction);
         commitTransaction(transaction);
 
-        assertEquals(TestModel.TEST_QNAME, readData(TestModel.TEST_PATH).get().getIdentifier().getNodeType());
+        assertEquals(TestModel.TEST_QNAME, readData(TestModel.TEST_PATH).orElseThrow().name().getNodeType());
     }
 
     @Test
     public void testSerialization() {
         YangInstanceIdentifier writePath = TestModel.TEST_PATH;
-        NormalizedNode writeData = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
-                .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+        ContainerNode writeData = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+            .build();
 
         YangInstanceIdentifier mergePath = TestModel.OUTER_LIST_PATH;
-        NormalizedNode mergeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(TestModel.OUTER_LIST_QNAME)).build();
+        ContainerNode mergeData = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.OUTER_LIST_QNAME))
+            .build();
 
         YangInstanceIdentifier deletePath = TestModel.TEST_PATH;
 
         MutableCompositeModification compositeModification =
-            new MutableCompositeModification(DataStoreVersions.SODIUM_SR1_VERSION);
+            new MutableCompositeModification(DataStoreVersions.POTASSIUM_VERSION);
         compositeModification.addModification(new WriteModification(writePath, writeData));
         compositeModification.addModification(new MergeModification(mergePath, mergeData));
         compositeModification.addModification(new DeleteModification(deletePath));
@@ -56,35 +60,37 @@ public class MutableCompositeModificationTest extends AbstractModificationTest {
         assertEquals(360, bytes.length);
         MutableCompositeModification clone = (MutableCompositeModification) SerializationUtils.deserialize(bytes);
 
-        assertEquals("getVersion", DataStoreVersions.SODIUM_SR1_VERSION, clone.getVersion());
+        assertEquals("getVersion", DataStoreVersions.POTASSIUM_VERSION, clone.getVersion());
 
         assertEquals("getModifications size", 3, clone.getModifications().size());
 
         WriteModification write = (WriteModification)clone.getModifications().get(0);
-        assertEquals("getVersion", DataStoreVersions.SODIUM_SR1_VERSION, write.getVersion());
+        assertEquals("getVersion", DataStoreVersions.POTASSIUM_VERSION, write.getVersion());
         assertEquals("getPath", writePath, write.getPath());
         assertEquals("getData", writeData, write.getData());
 
         MergeModification merge = (MergeModification)clone.getModifications().get(1);
-        assertEquals("getVersion", DataStoreVersions.SODIUM_SR1_VERSION, merge.getVersion());
+        assertEquals("getVersion", DataStoreVersions.POTASSIUM_VERSION, merge.getVersion());
         assertEquals("getPath", mergePath, merge.getPath());
         assertEquals("getData", mergeData, merge.getData());
 
         DeleteModification delete = (DeleteModification)clone.getModifications().get(2);
-        assertEquals("getVersion", DataStoreVersions.SODIUM_SR1_VERSION, delete.getVersion());
+        assertEquals("getVersion", DataStoreVersions.POTASSIUM_VERSION, delete.getVersion());
         assertEquals("getPath", deletePath, delete.getPath());
     }
 
     @Test
     public void testSerializationModern() {
         YangInstanceIdentifier writePath = TestModel.TEST_PATH;
-        NormalizedNode writeData = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
-                .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+        ContainerNode writeData = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+            .build();
 
         YangInstanceIdentifier mergePath = TestModel.OUTER_LIST_PATH;
-        NormalizedNode mergeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(TestModel.OUTER_LIST_QNAME)).build();
+        ContainerNode mergeData = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.OUTER_LIST_QNAME))
+            .build();
 
         YangInstanceIdentifier deletePath = TestModel.TEST_PATH;
 
index 416b9ae0ef95cc9063149ad6e8de23e1e1226e84..82c8f757e8571ce8bb8b75e3c02343b1331e6c3a 100644 (file)
@@ -10,15 +10,15 @@ package org.opendaylight.controller.cluster.datastore.modification;
 import static org.junit.Assert.assertEquals;
 
 import java.util.Optional;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class WriteModificationTest extends AbstractModificationTest {
     @Test
     public void testApply() throws Exception {
@@ -34,14 +34,12 @@ public class WriteModificationTest extends AbstractModificationTest {
 
     @Test
     public void testSerialization() {
-        YangInstanceIdentifier path = TestModel.TEST_PATH;
-        NormalizedNode data = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
-                .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+        WriteModification expected = new WriteModification(TestModel.TEST_PATH, Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+            .build());
 
-        WriteModification expected = new WriteModification(path, data);
-
-        WriteModification clone = (WriteModification) SerializationUtils.clone(expected);
+        WriteModification clone = SerializationUtils.clone(expected);
         assertEquals("getPath", expected.getPath(), clone.getPath());
         assertEquals("getData", expected.getData(), clone.getData());
     }
index 8453368cb18e323da401188a0569721ca911f19c..62ec2d0c8a0b30116228d74fd07961d26ca1d54b 100644 (file)
@@ -8,9 +8,7 @@
 package org.opendaylight.controller.cluster.datastore.persisted;
 
 public class AbortTransactionPayloadTest extends AbstractIdentifiablePayloadTest<AbortTransactionPayload> {
-
-    @Override
-    AbortTransactionPayload object() {
-        return AbortTransactionPayload.create(nextTransactionId(), 512);
+    public AbortTransactionPayloadTest() {
+        super(AbortTransactionPayload.create(newTransactionId(0), 512), 125);
     }
 }
index a04c16919876075fb2d2c6b8e77b907a8518fcd6..5b82a478a74973dc1c2786302586af5296f5ae39 100644 (file)
@@ -7,19 +7,27 @@
  */
 package org.opendaylight.controller.cluster.datastore.persisted;
 
+import static java.util.Objects.requireNonNull;
+import static org.junit.Assert.assertEquals;
+
 import org.apache.commons.lang3.SerializationUtils;
-import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.AbstractTest;
 
-public abstract class AbstractIdentifiablePayloadTest<T extends AbstractIdentifiablePayload<?>> extends AbstractTest {
+abstract class AbstractIdentifiablePayloadTest<T extends AbstractIdentifiablePayload<?>> extends AbstractTest {
+    private final T object;
+    private final int expectedSize;
 
-    abstract T object();
+    AbstractIdentifiablePayloadTest(final T object, final int expectedSize) {
+        this.object = requireNonNull(object);
+        this.expectedSize = expectedSize;
+    }
 
     @Test
     public void testSerialization() {
-        final T object = object();
-        final T cloned = SerializationUtils.clone(object);
-        Assert.assertEquals(object.getIdentifier(), cloned.getIdentifier());
+        final byte[] bytes = SerializationUtils.serialize(object);
+        assertEquals(expectedSize, bytes.length);
+        final T cloned = SerializationUtils.deserialize(bytes);
+        assertEquals(object.getIdentifier(), cloned.getIdentifier());
     }
 }
index eeed0612b7f3bbc4314f386b14e4380a0c281cf6..071914bfc90fda3150d447892a7dbb6261480218 100644 (file)
@@ -8,9 +8,7 @@
 package org.opendaylight.controller.cluster.datastore.persisted;
 
 public class CloseLocalHistoryPayloadTest extends AbstractIdentifiablePayloadTest<CloseLocalHistoryPayload> {
-
-    @Override
-    CloseLocalHistoryPayload object() {
-        return CloseLocalHistoryPayload.create(nextHistoryId(), 512);
+    public CloseLocalHistoryPayloadTest() {
+        super(CloseLocalHistoryPayload.create(newHistoryId(0), 512), 124);
     }
 }
index 6b2156ec77030a8ad0ad568b8696c8f397ba9b15..215c47d4f5766bde17dbf81029038d887b5aaf23 100644 (file)
@@ -18,25 +18,22 @@ import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Before;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.AbstractTest;
-import org.opendaylight.controller.cluster.datastore.persisted.DataTreeCandidateInputOutput.DataTreeCandidateWithVersion;
+import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload.CandidateTransaction;
 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidates;
 
 public class CommitTransactionPayloadTest extends AbstractTest {
     static final QName LEAF_SET = QName.create(TestModel.TEST_QNAME, "leaf-set");
@@ -46,7 +43,7 @@ public class CommitTransactionPayloadTest extends AbstractTest {
     private static DataTreeCandidateNode findNode(final Collection<DataTreeCandidateNode> nodes,
             final PathArgument arg) {
         for (DataTreeCandidateNode node : nodes) {
-            if (arg.equals(node.getIdentifier())) {
+            if (arg.equals(node.name())) {
                 return node;
             }
         }
@@ -57,42 +54,41 @@ public class CommitTransactionPayloadTest extends AbstractTest {
             final Collection<DataTreeCandidateNode> actual) {
         // Make sure all expected nodes are there
         for (DataTreeCandidateNode exp : expected) {
-            final DataTreeCandidateNode act = findNode(actual, exp.getIdentifier());
+            final DataTreeCandidateNode act = findNode(actual, exp.name());
             assertNotNull("missing expected child", act);
             assertCandidateNodeEquals(exp, act);
         }
         // Make sure no nodes are present which are not in the expected set
         for (DataTreeCandidateNode act : actual) {
-            final DataTreeCandidateNode exp = findNode(expected, act.getIdentifier());
+            final DataTreeCandidateNode exp = findNode(expected, act.name());
             assertNull("unexpected child", exp);
         }
     }
 
-    private static void assertCandidateEquals(final DataTreeCandidate expected,
-            final DataTreeCandidateWithVersion actual) {
-        final DataTreeCandidate candidate = actual.getCandidate();
+    private static void assertCandidateEquals(final DataTreeCandidate expected, final CandidateTransaction actual) {
+        final var candidate = actual.candidate();
         assertEquals("root path", expected.getRootPath(), candidate.getRootPath());
         assertCandidateNodeEquals(expected.getRootNode(), candidate.getRootNode());
     }
 
     private static void assertCandidateNodeEquals(final DataTreeCandidateNode expected,
             final DataTreeCandidateNode actual) {
-        assertEquals("child type", expected.getModificationType(), actual.getModificationType());
+        assertEquals("child type", expected.modificationType(), actual.modificationType());
 
-        switch (actual.getModificationType()) {
+        switch (actual.modificationType()) {
             case DELETE:
             case WRITE:
-                assertEquals("child identifier", expected.getIdentifier(), actual.getIdentifier());
-                assertEquals("child data", expected.getDataAfter(), actual.getDataAfter());
+                assertEquals("child identifier", expected.name(), actual.name());
+                assertEquals("child data", expected.dataAfter(), actual.dataAfter());
                 break;
             case SUBTREE_MODIFIED:
-                assertEquals("child identifier", expected.getIdentifier(), actual.getIdentifier());
-                assertChildrenEquals(expected.getChildNodes(), actual.getChildNodes());
+                assertEquals("child identifier", expected.name(), actual.name());
+                assertChildrenEquals(expected.childNodes(), actual.childNodes());
                 break;
             case UNMODIFIED:
                 break;
             default:
-                fail("Unexpect root type " + actual.getModificationType());
+                fail("Unexpect root type " + actual.modificationType());
                 break;
         }
     }
@@ -100,88 +96,75 @@ public class CommitTransactionPayloadTest extends AbstractTest {
     @Before
     public void setUp() {
         setUpStatic();
-        final YangInstanceIdentifier writePath = TestModel.TEST_PATH;
-        final NormalizedNode writeData = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
-                .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
-        candidate = DataTreeCandidates.fromNormalizedNode(writePath, writeData);
+        candidate = DataTreeCandidates.fromNormalizedNode(TestModel.TEST_PATH, ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+            .build());
     }
 
     @Test
     public void testCandidateSerialization() throws IOException {
         final CommitTransactionPayload payload = CommitTransactionPayload.create(nextTransactionId(), candidate);
         assertEquals("payload size", 156, payload.size());
+        assertEquals("serialized size", 242, SerializationUtils.serialize(payload).length);
     }
 
     @Test
     public void testCandidateSerDes() throws IOException {
         final CommitTransactionPayload payload = CommitTransactionPayload.create(nextTransactionId(), candidate);
-        assertCandidateEquals(candidate, payload.getCandidate().getValue());
+        assertCandidateEquals(candidate, payload.getCandidate());
     }
 
     @Test
     public void testPayloadSerDes() throws IOException {
         final CommitTransactionPayload payload = CommitTransactionPayload.create(nextTransactionId(), candidate);
-        assertCandidateEquals(candidate, SerializationUtils.clone(payload).getCandidate().getValue());
+        assertCandidateEquals(candidate, SerializationUtils.clone(payload).getCandidate());
     }
 
-    @SuppressWarnings({ "rawtypes", "unchecked" })
     @Test
     public void testLeafSetEntryNodeCandidate() throws Exception {
-        YangInstanceIdentifier.NodeWithValue entryPathArg = new YangInstanceIdentifier.NodeWithValue(LEAF_SET, "one");
+        NodeWithValue<String> entryPathArg = new NodeWithValue<>(LEAF_SET, "one");
         YangInstanceIdentifier leafSetEntryPath = YangInstanceIdentifier.builder(TestModel.TEST_PATH).node(LEAF_SET)
                 .node(entryPathArg).build();
 
-        NormalizedNode leafSetEntryNode = Builders.leafSetEntryBuilder().withNodeIdentifier(entryPathArg)
-                .withValue("one").build();
-
-        candidate = DataTreeCandidates.fromNormalizedNode(leafSetEntryPath, leafSetEntryNode);
+        candidate = DataTreeCandidates.fromNormalizedNode(leafSetEntryPath, ImmutableNodes.leafSetEntry(entryPathArg));
         CommitTransactionPayload payload = CommitTransactionPayload.create(nextTransactionId(), candidate);
-        assertCandidateEquals(candidate, payload.getCandidate().getValue());
+        assertCandidateEquals(candidate, payload.getCandidate());
     }
 
-    @SuppressWarnings({ "rawtypes", "unchecked" })
     @Test
     public void testLeafSetNodeCandidate() throws Exception {
-        YangInstanceIdentifier.NodeWithValue entryPathArg = new YangInstanceIdentifier.NodeWithValue(LEAF_SET, "one");
         YangInstanceIdentifier leafSetPath = YangInstanceIdentifier.builder(TestModel.TEST_PATH).node(LEAF_SET).build();
 
-        LeafSetEntryNode leafSetEntryNode = Builders.leafSetEntryBuilder().withNodeIdentifier(entryPathArg)
-                .withValue("one").build();
-        NormalizedNode leafSetNode = Builders.leafSetBuilder().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(LEAF_SET)).withChild(leafSetEntryNode).build();
-
-        candidate = DataTreeCandidates.fromNormalizedNode(leafSetPath, leafSetNode);
+        candidate = DataTreeCandidates.fromNormalizedNode(leafSetPath, ImmutableNodes.newSystemLeafSetBuilder()
+            .withNodeIdentifier(new NodeIdentifier(LEAF_SET))
+            .withChild(ImmutableNodes.leafSetEntry(LEAF_SET, "one"))
+            .build());
         CommitTransactionPayload payload = CommitTransactionPayload.create(nextTransactionId(), candidate);
-        assertCandidateEquals(candidate, payload.getCandidate().getValue());
+        assertCandidateEquals(candidate, payload.getCandidate());
     }
 
-    @SuppressWarnings({ "rawtypes", "unchecked" })
     @Test
     public void testOrderedLeafSetNodeCandidate() throws Exception {
-        YangInstanceIdentifier.NodeWithValue entryPathArg = new YangInstanceIdentifier.NodeWithValue(LEAF_SET, "one");
         YangInstanceIdentifier leafSetPath = YangInstanceIdentifier.builder(TestModel.TEST_PATH).node(LEAF_SET).build();
 
-        LeafSetEntryNode leafSetEntryNode = Builders.leafSetEntryBuilder().withNodeIdentifier(entryPathArg)
-                .withValue("one").build();
-        NormalizedNode leafSetNode = Builders.orderedLeafSetBuilder().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(LEAF_SET)).withChild(leafSetEntryNode).build();
-
-        candidate = DataTreeCandidates.fromNormalizedNode(leafSetPath, leafSetNode);
+        candidate = DataTreeCandidates.fromNormalizedNode(leafSetPath, ImmutableNodes.newUserLeafSetBuilder()
+            .withNodeIdentifier(new NodeIdentifier(LEAF_SET))
+            .withChild(ImmutableNodes.leafSetEntry(LEAF_SET, "one"))
+            .build());
         CommitTransactionPayload payload = CommitTransactionPayload.create(nextTransactionId(), candidate);
-        assertCandidateEquals(candidate, payload.getCandidate().getValue());
+        assertCandidateEquals(candidate, payload.getCandidate());
     }
 
     @Test
     public void testLeafNodeCandidate() throws Exception {
         YangInstanceIdentifier leafPath = YangInstanceIdentifier.builder(TestModel.TEST_PATH)
                 .node(TestModel.DESC_QNAME).build();
-        LeafNode<Object> leafNode = Builders.leafBuilder().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(TestModel.DESC_QNAME)).withValue("test").build();
 
-        candidate = DataTreeCandidates.fromNormalizedNode(leafPath, leafNode);
+        candidate = DataTreeCandidates.fromNormalizedNode(leafPath,
+            ImmutableNodes.leafNode(TestModel.DESC_QNAME, "test"));
         CommitTransactionPayload payload = CommitTransactionPayload.create(nextTransactionId(), candidate);
-        assertCandidateEquals(candidate, payload.getCandidate().getValue());
+        assertCandidateEquals(candidate, payload.getCandidate());
     }
 
     @Test
@@ -194,6 +177,6 @@ public class CommitTransactionPayloadTest extends AbstractTest {
         candidate = dataTree.prepare(modification);
 
         CommitTransactionPayload payload = CommitTransactionPayload.create(nextTransactionId(), candidate);
-        assertCandidateEquals(candidate, payload.getCandidate().getValue());
+        assertCandidateEquals(candidate, payload.getCandidate());
     }
 }
index e0aef362e94ab1ee6e37507918dc3b341192c3aa..83941812294831d9f9d0b3640d2b45149a790b12 100644 (file)
@@ -8,9 +8,7 @@
 package org.opendaylight.controller.cluster.datastore.persisted;
 
 public class CreateLocalHistoryPayloadTest extends AbstractIdentifiablePayloadTest<CreateLocalHistoryPayload> {
-
-    @Override
-    CreateLocalHistoryPayload object() {
-        return CreateLocalHistoryPayload.create(nextHistoryId(), 512);
+    public CreateLocalHistoryPayloadTest() {
+        super(CreateLocalHistoryPayload.create(newHistoryId(0), 512), 124);
     }
 }
index d1873f0f4b51d50eb441b580f43b74a063f0b91f..556772456c284c582c96ae7d44c5766466ada6ca 100644 (file)
@@ -42,21 +42,21 @@ public class FrontendShardDataTreeSnapshotMetadataTest {
     @Test
     public void testCreateMetadataSnapshotEmptyInput() throws Exception {
         final FrontendShardDataTreeSnapshotMetadata emptyOrigSnapshot = createEmptyMetadataSnapshot();
-        final FrontendShardDataTreeSnapshotMetadata emptyCopySnapshot = copy(emptyOrigSnapshot, 127);
+        final FrontendShardDataTreeSnapshotMetadata emptyCopySnapshot = copy(emptyOrigSnapshot, 86);
         testMetadataSnapshotEqual(emptyOrigSnapshot, emptyCopySnapshot);
     }
 
     @Test
     public void testSerializeMetadataSnapshotWithOneClient() throws Exception {
         final FrontendShardDataTreeSnapshotMetadata origSnapshot = createMetadataSnapshot(1);
-        final FrontendShardDataTreeSnapshotMetadata copySnapshot = copy(origSnapshot, 162);
+        final FrontendShardDataTreeSnapshotMetadata copySnapshot = copy(origSnapshot, 121);
         testMetadataSnapshotEqual(origSnapshot, copySnapshot);
     }
 
     @Test
     public void testSerializeMetadataSnapshotWithMoreClients() throws Exception {
         final FrontendShardDataTreeSnapshotMetadata origSnapshot = createMetadataSnapshot(5);
-        final FrontendShardDataTreeSnapshotMetadata copySnapshot = copy(origSnapshot, 314);
+        final FrontendShardDataTreeSnapshotMetadata copySnapshot = copy(origSnapshot, 273);
         testMetadataSnapshotEqual(origSnapshot, copySnapshot);
     }
 
@@ -70,15 +70,15 @@ public class FrontendShardDataTreeSnapshotMetadataTest {
 
         final Map<ClientIdentifier, FrontendClientMetadata> origIdent = new HashMap<>();
         final Map<ClientIdentifier, FrontendClientMetadata> copyIdent = new HashMap<>();
-        origClientList.forEach(client -> origIdent.put(client.getIdentifier(), client));
-        origClientList.forEach(client -> copyIdent.put(client.getIdentifier(), client));
+        origClientList.forEach(client -> origIdent.put(client.clientId(), client));
+        origClientList.forEach(client -> copyIdent.put(client.clientId(), client));
 
         assertTrue(origIdent.keySet().containsAll(copyIdent.keySet()));
         assertTrue(copyIdent.keySet().containsAll(origIdent.keySet()));
 
         origIdent.values().forEach(client -> {
-            final FrontendClientMetadata copyClient = copyIdent.get(client.getIdentifier());
-            testObject(client.getIdentifier(), copyClient.getIdentifier());
+            final var copyClient = copyIdent.get(client.clientId());
+            testObject(client.clientId(), copyClient.clientId());
             assertEquals(client.getPurgedHistories(), copyClient.getPurgedHistories());
             assertEquals(client.getCurrentHistories(), copyClient.getCurrentHistories());
         });
index 3a3ded1af7e049218f2cc7a4064de2d57f5f0f93..cca22204133d02814328781e2cd8617a6e82b078 100644 (file)
@@ -8,9 +8,7 @@
 package org.opendaylight.controller.cluster.datastore.persisted;
 
 public class PurgeLocalHistoryPayloadTest extends AbstractIdentifiablePayloadTest<PurgeLocalHistoryPayload> {
-
-    @Override
-    PurgeLocalHistoryPayload object() {
-        return PurgeLocalHistoryPayload.create(nextHistoryId(), 512);
+    public PurgeLocalHistoryPayloadTest() {
+        super(PurgeLocalHistoryPayload.create(newHistoryId(0), 512), 124);
     }
 }
index cf59654467d8629dd9236fb845744cd25900caae..cfae341829575c5bc9da1475ab1b714c7db9702a 100644 (file)
@@ -8,9 +8,7 @@
 package org.opendaylight.controller.cluster.datastore.persisted;
 
 public class PurgeTransactionPayloadTest extends AbstractIdentifiablePayloadTest<PurgeTransactionPayload> {
-
-    @Override
-    PurgeTransactionPayload object() {
-        return PurgeTransactionPayload.create(nextTransactionId(), 512);
+    public PurgeTransactionPayloadTest() {
+        super(PurgeTransactionPayload.create(newTransactionId(0), 512), 125);
     }
 }
index 3051194a6f019c0ceb494713ff6c24062570f406..9d172e653a69f6e7adeac56bf1b50a06f1f5cdb2 100644 (file)
@@ -8,9 +8,7 @@
 package org.opendaylight.controller.cluster.datastore.persisted;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 
-import com.google.common.collect.ImmutableMap;
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.Externalizable;
@@ -23,10 +21,9 @@ import java.util.Map;
 import java.util.Optional;
 import org.junit.Test;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 
 /**
  * Unit tests for ShardDataTreeSnapshot.
@@ -34,11 +31,10 @@ import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableCo
  * @author Thomas Pantelis
  */
 public class ShardDataTreeSnapshotTest {
-
     @Test
     public void testShardDataTreeSnapshotWithNoMetadata() throws Exception {
-        NormalizedNode expectedNode = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
+        ContainerNode expectedNode = ImmutableNodes.newContainerBuilder()
+                .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
                 .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
 
         MetadataShardDataTreeSnapshot snapshot = new MetadataShardDataTreeSnapshot(expectedNode);
@@ -48,28 +44,26 @@ public class ShardDataTreeSnapshotTest {
         }
 
         final byte[] bytes = bos.toByteArray();
-        assertEquals(236, bytes.length);
+        assertEquals(202, bytes.length);
 
         ShardDataTreeSnapshot deserialized;
         try (ObjectInputStream in = new ObjectInputStream(new ByteArrayInputStream(bytes))) {
             deserialized = ShardDataTreeSnapshot.deserialize(in).getSnapshot();
         }
 
-        Optional<NormalizedNode> actualNode = deserialized.getRootNode();
-        assertTrue("rootNode present", actualNode.isPresent());
-        assertEquals("rootNode", expectedNode, actualNode.get());
+        assertEquals("rootNode", Optional.of(expectedNode), deserialized.getRootNode());
         assertEquals("Deserialized type", MetadataShardDataTreeSnapshot.class, deserialized.getClass());
         assertEquals("Metadata size", 0, ((MetadataShardDataTreeSnapshot)deserialized).getMetadata().size());
     }
 
     @Test
     public void testShardDataTreeSnapshotWithMetadata() throws Exception {
-        NormalizedNode expectedNode = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
+        ContainerNode expectedNode = ImmutableNodes.newContainerBuilder()
+                .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
                 .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
 
         Map<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> expMetadata =
-                ImmutableMap.of(TestShardDataTreeSnapshotMetadata.class, new TestShardDataTreeSnapshotMetadata("test"));
+                Map.of(TestShardDataTreeSnapshotMetadata.class, new TestShardDataTreeSnapshotMetadata("test"));
         MetadataShardDataTreeSnapshot snapshot = new MetadataShardDataTreeSnapshot(expectedNode, expMetadata);
         ByteArrayOutputStream bos = new ByteArrayOutputStream();
         try (ObjectOutputStream out = new ObjectOutputStream(bos)) {
@@ -77,22 +71,21 @@ public class ShardDataTreeSnapshotTest {
         }
 
         final byte[] bytes = bos.toByteArray();
-        assertEquals(384, bytes.length);
+        assertEquals(350, bytes.length);
 
         ShardDataTreeSnapshot deserialized;
         try (ObjectInputStream in = new ObjectInputStream(new ByteArrayInputStream(bytes))) {
             deserialized = ShardDataTreeSnapshot.deserialize(in).getSnapshot();
         }
 
-        Optional<NormalizedNode> actualNode = deserialized.getRootNode();
-        assertTrue("rootNode present", actualNode.isPresent());
-        assertEquals("rootNode", expectedNode, actualNode.get());
+        assertEquals("rootNode", Optional.of(expectedNode), deserialized.getRootNode());
         assertEquals("Deserialized type", MetadataShardDataTreeSnapshot.class, deserialized.getClass());
         assertEquals("Metadata", expMetadata, ((MetadataShardDataTreeSnapshot)deserialized).getMetadata());
     }
 
     static class TestShardDataTreeSnapshotMetadata
             extends ShardDataTreeSnapshotMetadata<TestShardDataTreeSnapshotMetadata> {
+        @java.io.Serial
         private static final long serialVersionUID = 1L;
 
         private final String data;
@@ -118,11 +111,13 @@ public class ShardDataTreeSnapshotTest {
 
         @Override
         public boolean equals(final Object obj) {
-            return obj instanceof TestShardDataTreeSnapshotMetadata
-                    && data.equals(((TestShardDataTreeSnapshotMetadata)obj).data);
+            return obj instanceof TestShardDataTreeSnapshotMetadata other && data.equals(other.data);
         }
 
         private static class Proxy implements Externalizable {
+            @java.io.Serial
+            private static final long serialVersionUID = 7534948936595056176L;
+
             private String data;
 
             @SuppressWarnings("checkstyle:RedundantModifier")
index 998a85f8dab858b6f6117e920196c5bae7eb4a1c..c1c09afa10b836b4473a797bf2ee5ae517b010e0 100644 (file)
@@ -10,7 +10,7 @@ package org.opendaylight.controller.cluster.datastore.persisted;
 import static org.junit.Assert.assertEquals;
 
 import java.util.Arrays;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -24,7 +24,7 @@ public class ShardManagerSnapshotTest {
     public void testSerialization() {
         ShardManagerSnapshot expected =
                 new ShardManagerSnapshot(Arrays.asList("shard1", "shard2"));
-        ShardManagerSnapshot cloned = (ShardManagerSnapshot) SerializationUtils.clone(expected);
+        ShardManagerSnapshot cloned = SerializationUtils.clone(expected);
 
         assertEquals("getShardList", expected.getShardList(), cloned.getShardList());
     }
index 4b324ff29b53193a13e83ed36e4219a650b6642e..5a3019cd9f4f0acc6885e4c933bf433f79f8817e 100644 (file)
@@ -10,13 +10,14 @@ package org.opendaylight.controller.cluster.datastore.persisted;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 
-import org.apache.commons.lang.SerializationUtils;
+import java.util.Optional;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
 
 /**
  * Unit tests for ShardSnapshotState.
@@ -24,19 +25,19 @@ import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableCo
  * @author Thomas Pantelis
  */
 public class ShardSnapshotStateTest {
-
     @Test
     public void testSerialization() {
-        NormalizedNode expectedNode = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
-                .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+        ContainerNode expectedNode = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+            .build();
 
         ShardSnapshotState expected = new ShardSnapshotState(new MetadataShardDataTreeSnapshot(expectedNode));
-        ShardSnapshotState cloned = (ShardSnapshotState) SerializationUtils.clone(expected);
+        ShardSnapshotState cloned = SerializationUtils.clone(expected);
 
         assertNotNull("getSnapshot is null", cloned.getSnapshot());
         assertEquals("getSnapshot type", MetadataShardDataTreeSnapshot.class, cloned.getSnapshot().getClass());
-        assertEquals("getRootNode", expectedNode,
-                ((MetadataShardDataTreeSnapshot)cloned.getSnapshot()).getRootNode().get());
+        assertEquals("getRootNode", Optional.of(expectedNode),
+                ((MetadataShardDataTreeSnapshot)cloned.getSnapshot()).getRootNode());
     }
 }
index 44012d81d635bbf03c2075abb8de46ee82ee4a57..818c18908dd2863f1d7eb5c79720e7fda4e526e5 100644 (file)
@@ -10,8 +10,7 @@ package org.opendaylight.controller.cluster.datastore.persisted;
 import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet;
 
 public class SkipTransactionsPayloadTest extends AbstractIdentifiablePayloadTest<SkipTransactionsPayload> {
-    @Override
-    SkipTransactionsPayload object() {
-        return SkipTransactionsPayload.create(nextHistoryId(), MutableUnsignedLongSet.of(42).immutableCopy(), 512);
+    public SkipTransactionsPayloadTest() {
+        super(SkipTransactionsPayload.create(newHistoryId(0), MutableUnsignedLongSet.of(42).immutableCopy(), 512), 131);
     }
 }
index c8c3f6670bc64c5ea54201575c70c08f062385f5..889f1d47e1f8c2c55437d28180e73b684d1629f1 100644 (file)
@@ -73,12 +73,12 @@ import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
+import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
 import org.opendaylight.controller.cluster.datastore.AbstractClusterRefActorTest;
 import org.opendaylight.controller.cluster.datastore.ClusterWrapperImpl;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 import org.opendaylight.controller.cluster.datastore.DatastoreContext;
 import org.opendaylight.controller.cluster.datastore.DatastoreContextFactory;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStore;
 import org.opendaylight.controller.cluster.datastore.Shard;
 import org.opendaylight.controller.cluster.datastore.config.Configuration;
 import org.opendaylight.controller.cluster.datastore.config.ConfigurationImpl;
@@ -134,8 +134,9 @@ import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
 import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.opendaylight.yangtools.yang.common.XMLNamespace;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -153,7 +154,7 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
     private static int ID_COUNTER = 1;
     private static ActorRef mockShardActor;
     private static ShardIdentifier mockShardName;
-    private static SettableFuture<Void> ready;
+    private static SettableFuture<Empty> ready;
     private static EffectiveModelContext TEST_SCHEMA_CONTEXT;
 
     private final String shardMrgIDSuffix = "config" + ID_COUNTER++;
@@ -202,12 +203,12 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
     }
 
     private TestShardManager.Builder newTestShardMgrBuilder() {
-        return TestShardManager.builder(datastoreContextBuilder).distributedDataStore(mock(DistributedDataStore.class));
+        return TestShardManager.builder(datastoreContextBuilder)
+            .distributedDataStore(mock(ClientBackedDataStore.class));
     }
 
     private TestShardManager.Builder newTestShardMgrBuilder(final Configuration config) {
-        return TestShardManager.builder(datastoreContextBuilder).configuration(config)
-                .distributedDataStore(mock(DistributedDataStore.class));
+        return newTestShardMgrBuilder().configuration(config);
     }
 
     private Props newShardMgrProps() {
@@ -243,8 +244,9 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
     }
 
     private TestShardManager.Builder newTestShardMgrBuilderWithMockShardActor(final ActorRef shardActor) {
-        return TestShardManager.builder(datastoreContextBuilder).shardActor(shardActor)
-                .distributedDataStore(mock(DistributedDataStore.class));
+        return TestShardManager.builder(datastoreContextBuilder)
+            .shardActor(shardActor)
+            .distributedDataStore(mock(ClientBackedDataStore.class));
     }
 
 
@@ -422,7 +424,7 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
         final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         DataTree mockDataTree = mock(DataTree.class);
         shardManager.tell(new ShardLeaderStateChanged(memberId, memberId, mockDataTree,
@@ -451,7 +453,7 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
         final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         String memberId2 = "member-2-shard-default-" + shardMrgIDSuffix;
         String memberId1 = "member-1-shard-default-" + shardMrgIDSuffix;
@@ -475,7 +477,7 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
         final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         String memberId2 = "member-2-shard-default-" + shardMrgIDSuffix;
         MockClusterWrapper.sendMemberUp(shardManager, "member-2", kit.getRef().path().toString());
@@ -513,7 +515,7 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
         final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         shardManager.tell(new FindPrimary(Shard.DEFAULT_NAME, false), kit.getRef());
 
@@ -527,7 +529,7 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
         final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         String memberId = "member-1-shard-default-" + shardMrgIDSuffix;
         shardManager.tell(
@@ -570,7 +572,7 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
 
         kit.expectNoMessage(Duration.ofMillis(150));
 
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         kit.expectNoMessage(Duration.ofMillis(150));
 
@@ -607,7 +609,7 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
 
         kit.expectMsgClass(Duration.ofSeconds(2), NotInitializedException.class);
 
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         kit.expectNoMessage(Duration.ofMillis(200));
 
@@ -621,7 +623,7 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
         final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
         shardManager.tell(new RoleChangeNotification("member-1-shard-default-" + shardMrgIDSuffix, null,
             RaftState.Candidate.name()), mockShardActor);
 
@@ -639,7 +641,7 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
         final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
         shardManager.tell(new RoleChangeNotification("member-1-shard-default-" + shardMrgIDSuffix, null,
             RaftState.IsolatedLeader.name()), mockShardActor);
 
@@ -657,7 +659,7 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
         final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         shardManager.tell(new FindPrimary(Shard.DEFAULT_NAME, true), kit.getRef());
 
@@ -702,7 +704,7 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
         shardManager1.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
         shardManager2.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
 
-        shardManager2.tell(new ActorInitialized(), mockShardActor2);
+        shardManager2.tell(new ActorInitialized(mockShardActor2), ActorRef.noSender());
 
         String memberId2 = "member-2-shard-astronauts-" + shardMrgIDSuffix;
         short leaderVersion = DataStoreVersions.CURRENT_VERSION - 1;
@@ -770,8 +772,8 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
         final TestKit kit = new TestKit(system1);
         shardManager1.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
         shardManager2.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager1.tell(new ActorInitialized(), mockShardActor1);
-        shardManager2.tell(new ActorInitialized(), mockShardActor2);
+        shardManager1.tell(new ActorInitialized(mockShardActor1), ActorRef.noSender());
+        shardManager2.tell(new ActorInitialized(mockShardActor1), ActorRef.noSender());
 
         String memberId2 = "member-2-shard-default-" + shardMrgIDSuffix;
         String memberId1 = "member-1-shard-default-" + shardMrgIDSuffix;
@@ -876,8 +878,8 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
         final TestKit kit = new TestKit(system1);
         shardManager1.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
         shardManager2.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager1.tell(new ActorInitialized(), mockShardActor1);
-        shardManager2.tell(new ActorInitialized(), mockShardActor2);
+        shardManager1.tell(new ActorInitialized(mockShardActor1), ActorRef.noSender());
+        shardManager2.tell(new ActorInitialized(mockShardActor2), ActorRef.noSender());
 
         String memberId2 = "member-2-shard-default-" + shardMrgIDSuffix;
         String memberId1 = "member-1-shard-default-" + shardMrgIDSuffix;
@@ -972,8 +974,8 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
         final TestKit kit256 = new TestKit(system256);
         shardManager256.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit256.getRef());
         shardManager2.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit256.getRef());
-        shardManager256.tell(new ActorInitialized(), mockShardActor256);
-        shardManager2.tell(new ActorInitialized(), mockShardActor2);
+        shardManager256.tell(new ActorInitialized(mockShardActor256), ActorRef.noSender());
+        shardManager2.tell(new ActorInitialized(mockShardActor2), ActorRef.noSender());
 
         String memberId256 = "member-256-shard-default-" + shardMrgIDSuffix;
         String memberId2   = "member-2-shard-default-"   + shardMrgIDSuffix;
@@ -1047,7 +1049,7 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
         final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         shardManager.tell(new FindLocalShard(Shard.DEFAULT_NAME, false), kit.getRef());
 
@@ -1081,7 +1083,7 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
         Future<Object> future = Patterns.ask(shardManager, new FindLocalShard(Shard.DEFAULT_NAME, true),
             new Timeout(5, TimeUnit.SECONDS));
 
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         Object resp = Await.result(future, kit.duration("5 seconds"));
         assertTrue("Expected: LocalShardFound, Actual: " + resp, resp instanceof LocalShardFound);
@@ -1253,7 +1255,7 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
         final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         shardManager.tell(new SwitchShardBehavior(mockShardName, RaftState.Leader, 1000), kit.getRef());
 
@@ -1529,7 +1531,7 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
         newReplicaShardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
         leaderShardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
 
-        leaderShardManager.tell(new ActorInitialized(), mockShardLeaderActor);
+        leaderShardManager.tell(new ActorInitialized(mockShardLeaderActor), ActorRef.noSender());
 
         short leaderVersion = DataStoreVersions.CURRENT_VERSION - 1;
         leaderShardManager.tell(
@@ -1580,7 +1582,7 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
                 .createTestActor(newPropsShardMgrWithMockShardActor(), shardMgrID);
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         String leaderId = "leader-member-shard-default-" + shardMrgIDSuffix;
         AddServerReply addServerReply = new AddServerReply(ServerChangeStatus.ALREADY_EXISTS, null);
@@ -1638,7 +1640,7 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
         ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
         shardManager.tell(new ShardLeaderStateChanged(memberId, memberId, mock(DataTree.class),
             DataStoreVersions.CURRENT_VERSION), kit.getRef());
         shardManager.tell(
@@ -1753,7 +1755,7 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
         ActorRef shardManager = getSystem().actorOf(newPropsShardMgrWithMockShardActor(respondActor));
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), respondActor);
+        shardManager.tell(new ActorInitialized(respondActor), ActorRef.noSender());
         shardManager.tell(new ShardLeaderStateChanged(memberId, memberId, mock(DataTree.class),
             DataStoreVersions.CURRENT_VERSION), kit.getRef());
         shardManager.tell(
@@ -1825,8 +1827,8 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
         newReplicaShardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
         leaderShardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
 
-        leaderShardManager.tell(new ActorInitialized(), mockShardLeaderActor);
-        newReplicaShardManager.tell(new ActorInitialized(), mockShardLeaderActor);
+        leaderShardManager.tell(new ActorInitialized(mockShardLeaderActor), ActorRef.noSender());
+        newReplicaShardManager.tell(new ActorInitialized(mockShardLeaderActor), ActorRef.noSender());
 
         short leaderVersion = DataStoreVersions.CURRENT_VERSION - 1;
         leaderShardManager.tell(
@@ -1947,7 +1949,7 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
         shardManager.underlyingActor().waitForRecoveryComplete();
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), shard);
+        shardManager.tell(new ActorInitialized(shard), ActorRef.noSender());
 
         waitForShardInitialized(shardManager, "people", kit);
         waitForShardInitialized(shardManager, "default", kit);
@@ -2016,8 +2018,8 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
             .addShardActor("shard1", shard1).addShardActor("shard2", shard2).props());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), shard1);
-        shardManager.tell(new ActorInitialized(), shard2);
+        shardManager.tell(new ActorInitialized(shard1), ActorRef.noSender());
+        shardManager.tell(new ActorInitialized(shard2), ActorRef.noSender());
 
         FiniteDuration duration = FiniteDuration.create(5, TimeUnit.SECONDS);
         Future<Boolean> stopFuture = Patterns.gracefulStop(shardManager, duration, Shutdown.INSTANCE);
@@ -2053,7 +2055,7 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
         ActorRef shardManager = getSystem().actorOf(newPropsShardMgrWithMockShardActor(respondActor));
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), respondActor);
+        shardManager.tell(new ActorInitialized(respondActor), ActorRef.noSender());
         shardManager.tell(new ShardLeaderStateChanged(memberId, memberId, mock(DataTree.class),
             DataStoreVersions.CURRENT_VERSION), kit.getRef());
         shardManager.tell(
@@ -2085,7 +2087,7 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
         ActorRef shardManager = getSystem().actorOf(newPropsShardMgrWithMockShardActor(respondActor));
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), respondActor);
+        shardManager.tell(new ActorInitialized(respondActor), ActorRef.noSender());
         shardManager.tell(new RoleChangeNotification(memberId, null, RaftState.Follower.name()), respondActor);
 
         shardManager.tell(
@@ -2108,7 +2110,7 @@ public class ShardManagerTest extends AbstractClusterRefActorTest {
         final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         final Consumer<String> mockCallback = mock(Consumer.class);
         shardManager.tell(new RegisterForShardAvailabilityChanges(mockCallback), kit.getRef());
index a60597e2fb14c9fd4d23b2133702afb264d64753..daef2143a2df5c88b893b72701499457001a391a 100644 (file)
@@ -59,7 +59,7 @@ import org.opendaylight.controller.cluster.datastore.messages.RemotePrimaryShard
 import org.opendaylight.controller.cluster.raft.utils.EchoActor;
 import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.Await;
@@ -67,10 +67,10 @@ import scala.concurrent.Future;
 import scala.concurrent.duration.FiniteDuration;
 
 public class ActorUtilsTest extends AbstractActorTest {
-
     static final Logger LOG = LoggerFactory.getLogger(ActorUtilsTest.class);
 
-    private static class TestMessage {
+    private static final class TestMessage {
+
     }
 
     private static final class MockShardManager extends UntypedAbstractActor {
@@ -85,8 +85,7 @@ public class ActorUtilsTest extends AbstractActorTest {
         }
 
         @Override public void onReceive(final Object message) {
-            if (message instanceof FindPrimary) {
-                FindPrimary fp = (FindPrimary)message;
+            if (message instanceof FindPrimary fp) {
                 Object resp = findPrimaryResponses.get(fp.getShardName());
                 if (resp == null) {
                     LOG.error("No expected FindPrimary response found for shard name {}", fp.getShardName());
@@ -122,8 +121,8 @@ public class ActorUtilsTest extends AbstractActorTest {
             final ActorRef actorRef;
 
             MockShardManagerCreator() {
-                this.found = false;
-                this.actorRef = null;
+                found = false;
+                actorRef = null;
             }
 
             MockShardManagerCreator(final boolean found, final ActorRef actorRef) {
@@ -149,9 +148,7 @@ public class ActorUtilsTest extends AbstractActorTest {
             ActorUtils actorUtils = new ActorUtils(getSystem(), shardManagerActorRef,
                 mock(ClusterWrapper.class), mock(Configuration.class));
 
-            Optional<ActorRef> out = actorUtils.findLocalShard("default");
-
-            assertEquals(shardActorRef, out.get());
+            assertEquals(Optional.of(shardActorRef), actorUtils.findLocalShard("default"));
 
             testKit.expectNoMessage();
             return null;
@@ -379,7 +376,7 @@ public class ActorUtilsTest extends AbstractActorTest {
 
         assertNotNull(actual);
         assertTrue("LocalShardDataTree present", actual.getLocalShardDataTree().isPresent());
-        assertSame("LocalShardDataTree", mockDataTree, actual.getLocalShardDataTree().get());
+        assertSame("LocalShardDataTree", mockDataTree, actual.getLocalShardDataTree().orElseThrow());
         assertTrue("Unexpected PrimaryShardActor path " + actual.getPrimaryShardActor().path(),
                 expPrimaryPath.endsWith(actual.getPrimaryShardActor().pathString()));
         assertEquals("getPrimaryShardVersion", DataStoreVersions.CURRENT_VERSION, actual.getPrimaryShardVersion());
index 326313eb3b29fb7d4894db45785fa541672557e0..9ba38c2616ae5ce0cf22b80bcf54f6e7c937cf12 100644 (file)
@@ -15,7 +15,6 @@ import static org.junit.Assert.fail;
 import com.google.common.util.concurrent.Uninterruptibles;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collection;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Optional;
@@ -28,7 +27,7 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
 import org.opendaylight.yangtools.yang.data.api.schema.DistinctNodeContainer;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 
 public class MockDataTreeChangeListener implements DOMDataTreeChangeListener {
 
@@ -46,14 +45,14 @@ public class MockDataTreeChangeListener implements DOMDataTreeChangeListener {
 
     public void reset(final int newExpChangeEventCount) {
         changeLatch = new CountDownLatch(newExpChangeEventCount);
-        this.expChangeEventCount = newExpChangeEventCount;
+        expChangeEventCount = newExpChangeEventCount;
         synchronized (changeList) {
             changeList.clear();
         }
     }
 
     @Override
-    public void onDataTreeChanged(final Collection<DataTreeCandidate> changes) {
+    public void onDataTreeChanged(final List<DataTreeCandidate> changes) {
         if (changeLatch.getCount() > 0) {
             synchronized (changeList) {
                 changeList.addAll(changes);
@@ -89,27 +88,26 @@ public class MockDataTreeChangeListener implements DOMDataTreeChangeListener {
 
         for (int i = 0; i < expPaths.length; i++) {
             final DataTreeCandidate candidate = changeList.get(i);
-            final Optional<NormalizedNode> maybeDataAfter = candidate.getRootNode().getDataAfter();
-            if (!maybeDataAfter.isPresent()) {
+            final NormalizedNode dataAfter = candidate.getRootNode().dataAfter();
+            if (dataAfter == null) {
                 fail(String.format("Change %d does not contain data after. Actual: %s", i + 1,
-                        candidate.getRootNode()));
+                    candidate.getRootNode()));
             }
 
-            final NormalizedNode dataAfter = maybeDataAfter.get();
             final Optional<YangInstanceIdentifier> relativePath = expPaths[i].relativeTo(candidate.getRootPath());
             if (!relativePath.isPresent()) {
                 assertEquals(String.format("Change %d does not contain %s. Actual: %s", i + 1, expPaths[i],
-                        dataAfter), expPaths[i].getLastPathArgument(), dataAfter.getIdentifier());
+                        dataAfter), expPaths[i].getLastPathArgument(), dataAfter.name());
             } else {
                 NormalizedNode nextChild = dataAfter;
-                for (PathArgument pathArg: relativePath.get().getPathArguments()) {
+                for (PathArgument pathArg: relativePath.orElseThrow().getPathArguments()) {
                     boolean found = false;
                     if (nextChild instanceof DistinctNodeContainer) {
                         Optional<NormalizedNode> maybeChild = ((DistinctNodeContainer)nextChild)
                                 .findChildByArg(pathArg);
                         if (maybeChild.isPresent()) {
                             found = true;
-                            nextChild = maybeChild.get();
+                            nextChild = maybeChild.orElseThrow();
                         }
                     }
 
index 0844f3d4f3163f185f7be7f8a00f75632ace18dd..d8bbdcf71d05af6d4af0da4989da708db51b7263 100644 (file)
@@ -11,7 +11,6 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 import com.google.common.collect.ImmutableList;
-import com.google.common.util.concurrent.FluentFuture;
 import java.util.Collection;
 import java.util.Optional;
 import java.util.concurrent.ExecutionException;
@@ -27,29 +26,33 @@ import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
 import org.opendaylight.mdsal.dom.store.inmemory.InMemoryDOMDataStore;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 
 public class NormalizedNodeAggregatorTest {
 
     @Test
-    public void testAggregate() throws InterruptedException, ExecutionException,
-        DataValidationFailedException {
+    public void testAggregate() throws InterruptedException, ExecutionException, DataValidationFailedException {
         EffectiveModelContext schemaContext = SchemaContextHelper.full();
-        NormalizedNode expectedNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-        NormalizedNode expectedNode2 = ImmutableNodes.containerNode(CarsModel.CARS_QNAME);
-
-        Optional<NormalizedNode> optional = NormalizedNodeAggregator.aggregate(YangInstanceIdentifier.empty(),
+        NormalizedNode expectedNode1 = ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .build();
+        NormalizedNode expectedNode2 = ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(CarsModel.CARS_QNAME))
+            .build();
+
+        Optional<NormalizedNode> optional = NormalizedNodeAggregator.aggregate(YangInstanceIdentifier.of(),
                 ImmutableList.of(
                         Optional.<NormalizedNode>of(getRootNode(expectedNode1, schemaContext)),
                         Optional.<NormalizedNode>of(getRootNode(expectedNode2, schemaContext))),
                 schemaContext, LogicalDatastoreType.CONFIGURATION);
 
 
-        NormalizedNode normalizedNode = optional.get();
+        NormalizedNode normalizedNode = optional.orElseThrow();
 
         assertTrue("Expect value to be a Collection", normalizedNode.body() instanceof Collection);
 
@@ -79,7 +82,7 @@ public class NormalizedNodeAggregatorTest {
 
             DOMStoreWriteTransaction writeTransaction = store.newWriteOnlyTransaction();
 
-            writeTransaction.merge(YangInstanceIdentifier.of(moduleNode.getIdentifier().getNodeType()), moduleNode);
+            writeTransaction.merge(YangInstanceIdentifier.of(moduleNode.name().getNodeType()), moduleNode);
 
             DOMStoreThreePhaseCommitCohort ready = writeTransaction.ready();
 
@@ -89,18 +92,14 @@ public class NormalizedNodeAggregatorTest {
 
             DOMStoreReadTransaction readTransaction = store.newReadOnlyTransaction();
 
-            FluentFuture<Optional<NormalizedNode>> read = readTransaction.read(YangInstanceIdentifier.empty());
-
-            Optional<NormalizedNode> nodeOptional = read.get();
-
-            return nodeOptional.get();
+            return readTransaction.read(YangInstanceIdentifier.of()).get().orElseThrow();
         }
     }
 
     public static NormalizedNode findChildWithQName(final Collection<NormalizedNode> collection,
             final QName qname) {
         for (NormalizedNode node : collection) {
-            if (node.getIdentifier().getNodeType().equals(qname)) {
+            if (node.name().getNodeType().equals(qname)) {
                 return node;
             }
         }
index 1f5f92c03ca0ae115b1768133adbaebe5c0c8dd2..4c5c06e3fadff4ebd955badd3545bed7874f4347 100644 (file)
@@ -41,21 +41,22 @@ import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelpe
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModificationCursor;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.SchemaValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModificationCursor;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
+import org.opendaylight.yangtools.yang.data.tree.api.SchemaValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.TreeType;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
 import org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree;
 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 
@@ -140,19 +141,21 @@ public class PruningDataTreeModificationTest {
         verify(mockModification, times(1)).merge(path, normalizedNode);
 
         DataTreeCandidate candidate = getCandidate();
-        assertEquals("getModificationType", ModificationType.UNMODIFIED, candidate.getRootNode().getModificationType());
+        assertEquals("getModificationType", ModificationType.UNMODIFIED, candidate.getRootNode().modificationType());
     }
 
     @Test
     public void testMergeWithInvalidChildNodeNames() throws DataValidationFailedException {
-        ContainerNode augContainer = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(AUG_CONTAINER)).withChild(
-                        ImmutableNodes.containerNode(AUG_INNER_CONTAINER)).build();
-
         DataContainerChild outerNode = outerNode(outerNodeEntry(1, innerNode("one", "two")));
-        ContainerNode normalizedNode = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME)).withChild(outerNode)
-                .withChild(augContainer).withChild(ImmutableNodes.leafNode(AUG_QNAME, "aug")).build();
+        ContainerNode normalizedNode = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+            .withChild(outerNode)
+            .withChild(Builders.containerBuilder()
+                .withNodeIdentifier(new NodeIdentifier(AUG_CONTAINER))
+                .withChild(ImmutableNodes.containerNode(AUG_INNER_CONTAINER))
+                .build())
+            .withChild(ImmutableNodes.leafNode(AUG_QNAME, "aug"))
+            .build();
 
         YangInstanceIdentifier path = TestModel.TEST_PATH;
 
@@ -160,12 +163,12 @@ public class PruningDataTreeModificationTest {
 
         dataTree.commit(getCandidate());
 
-        ContainerNode prunedNode = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME)).withChild(outerNode).build();
+        ContainerNode prunedNode = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+            .withChild(outerNode)
+            .build();
 
-        Optional<NormalizedNode> actual = dataTree.takeSnapshot().readNode(path);
-        assertTrue("After pruning present", actual.isPresent());
-        assertEquals("After pruning", prunedNode, actual.get());
+        assertEquals("After pruning", Optional.of(prunedNode), dataTree.takeSnapshot().readNode(path));
     }
 
     @Test
@@ -178,7 +181,7 @@ public class PruningDataTreeModificationTest {
         verify(mockModification, times(1)).merge(path, normalizedNode);
 
         DataTreeCandidate candidate = getCandidate();
-        assertEquals("getModificationType", ModificationType.UNMODIFIED, candidate.getRootNode().getModificationType());
+        assertEquals("getModificationType", ModificationType.UNMODIFIED, candidate.getRootNode().modificationType());
     }
 
     @Test
@@ -201,13 +204,11 @@ public class PruningDataTreeModificationTest {
         localDataTree.validate(mod);
         localDataTree.commit(localDataTree.prepare(mod));
 
-        NormalizedNode normalizedNode = dataTree.takeSnapshot().readNode(YangInstanceIdentifier.empty()).get();
-        pruningDataTreeModification.write(YangInstanceIdentifier.empty(), normalizedNode);
+        NormalizedNode normalizedNode = dataTree.takeSnapshot().readNode(YangInstanceIdentifier.of()).orElseThrow();
+        pruningDataTreeModification.write(YangInstanceIdentifier.of(), normalizedNode);
         dataTree.commit(getCandidate());
 
-        Optional<NormalizedNode> actual = dataTree.takeSnapshot().readNode(YangInstanceIdentifier.empty());
-        assertTrue("Root present", actual.isPresent());
-        assertEquals("Root node", normalizedNode, actual.get());
+        assertEquals(Optional.of(normalizedNode), dataTree.takeSnapshot().readNode(YangInstanceIdentifier.of()));
     }
 
     @Test
@@ -215,17 +216,16 @@ public class PruningDataTreeModificationTest {
         final Shard mockShard = Mockito.mock(Shard.class);
 
         ShardDataTree shardDataTree = new ShardDataTree(mockShard, SCHEMA_CONTEXT, TreeType.CONFIGURATION);
-        NormalizedNode root = shardDataTree.readNode(YangInstanceIdentifier.empty()).get();
+        NormalizedNode root = shardDataTree.readNode(YangInstanceIdentifier.of()).orElseThrow();
 
-        NormalizedNode normalizedNode = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(root.getIdentifier().getNodeType())).withChild(
-                        ImmutableNodes.containerNode(AUG_CONTAINER)).build();
-        pruningDataTreeModification.write(YangInstanceIdentifier.empty(), normalizedNode);
+        NormalizedNode normalizedNode = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(root.name().getNodeType()))
+            .withChild(ImmutableNodes.containerNode(AUG_CONTAINER))
+            .build();
+        pruningDataTreeModification.write(YangInstanceIdentifier.of(), normalizedNode);
         dataTree.commit(getCandidate());
 
-        Optional<NormalizedNode> actual = dataTree.takeSnapshot().readNode(YangInstanceIdentifier.empty());
-        assertEquals("Root present", true, actual.isPresent());
-        assertEquals("Root node", root, actual.get());
+        assertEquals(Optional.of(root), dataTree.takeSnapshot().readNode(YangInstanceIdentifier.of()));
 
     }
 
@@ -239,20 +239,22 @@ public class PruningDataTreeModificationTest {
         verify(mockModification, times(1)).write(path, normalizedNode);
 
         DataTreeCandidate candidate = getCandidate();
-        assertEquals("getModificationType", ModificationType.UNMODIFIED, candidate.getRootNode().getModificationType());
+        assertEquals("getModificationType", ModificationType.UNMODIFIED, candidate.getRootNode().modificationType());
     }
 
     @Test
     public void testWriteWithInvalidChildNodeNames() throws DataValidationFailedException {
-        ContainerNode augContainer = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(AUG_CONTAINER)).withChild(
-                        ImmutableNodes.containerNode(AUG_INNER_CONTAINER)).build();
-
         DataContainerChild outerNode = outerNode(outerNodeEntry(1, innerNode("one", "two")));
-        ContainerNode normalizedNode = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME)).withChild(outerNode)
-                .withChild(augContainer).withChild(ImmutableNodes.leafNode(AUG_QNAME, "aug"))
-                .withChild(ImmutableNodes.leafNode(NAME_QNAME, "name")).build();
+        ContainerNode normalizedNode = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+            .withChild(outerNode)
+            .withChild(Builders.containerBuilder()
+                .withNodeIdentifier(new NodeIdentifier(AUG_CONTAINER))
+                .withChild(ImmutableNodes.containerNode(AUG_INNER_CONTAINER))
+                .build())
+            .withChild(ImmutableNodes.leafNode(AUG_QNAME, "aug"))
+            .withChild(ImmutableNodes.leafNode(NAME_QNAME, "name"))
+            .build();
 
         YangInstanceIdentifier path = TestModel.TEST_PATH;
 
@@ -260,13 +262,13 @@ public class PruningDataTreeModificationTest {
 
         dataTree.commit(getCandidate());
 
-        ContainerNode prunedNode = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME)).withChild(outerNode)
-                .withChild(ImmutableNodes.leafNode(NAME_QNAME, "name")).build();
+        ContainerNode prunedNode = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+            .withChild(outerNode)
+            .withChild(ImmutableNodes.leafNode(NAME_QNAME, "name"))
+            .build();
 
-        Optional<NormalizedNode> actual = dataTree.takeSnapshot().readNode(path);
-        assertTrue("After pruning present", actual.isPresent());
-        assertEquals("After pruning", prunedNode, actual.get());
+        assertEquals(Optional.of(prunedNode), dataTree.takeSnapshot().readNode(path));
     }
 
     @Test
index 0722fcdbae4f7c6bf4b7f378493a50edcf046d67..4b379461617eabc5c9cb18041be0579119fbb8bb 100644 (file)
@@ -10,15 +10,14 @@ package org.opendaylight.controller.md.cluster.datastore.model;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.common.Uint64;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.data.api.schema.SystemMapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.builder.CollectionNodeBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableMapNodeBuilder;
 
 public final class CarsModel {
     public static final QName BASE_QNAME = QName.create(
@@ -33,38 +32,26 @@ public final class CarsModel {
     public static final YangInstanceIdentifier CAR_LIST_PATH = BASE_PATH.node(CAR_QNAME);
 
     private CarsModel() {
-
+        // Hidden on purpose
     }
 
     public static ContainerNode create() {
-
-        // Create a list builder
-        CollectionNodeBuilder<MapEntryNode, SystemMapNode> cars =
-            ImmutableMapNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(CAR_QNAME));
-
-        // Create an entry for the car altima
-        MapEntryNode altima =
-            ImmutableNodes.mapEntryBuilder(CAR_QNAME, CAR_NAME_QNAME, "altima")
-                .withChild(ImmutableNodes.leafNode(CAR_NAME_QNAME, "altima"))
-                .withChild(ImmutableNodes.leafNode(CAR_PRICE_QNAME, Uint64.valueOf(1000)))
-                .build();
-
-        // Create an entry for the car accord
-        MapEntryNode honda =
-            ImmutableNodes.mapEntryBuilder(CAR_QNAME, CAR_NAME_QNAME, "accord")
-                .withChild(ImmutableNodes.leafNode(CAR_NAME_QNAME, "accord"))
-                .withChild(ImmutableNodes.leafNode(CAR_PRICE_QNAME, Uint64.valueOf("2000")))
-                .build();
-
-        cars.withChild(altima);
-        cars.withChild(honda);
-
-        return ImmutableContainerNodeBuilder.create()
-            .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(BASE_QNAME))
-            .withChild(cars.build())
+        return Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(BASE_QNAME))
+            .withChild(Builders.mapBuilder()
+                .withNodeIdentifier(new NodeIdentifier(CAR_QNAME))
+                // Create an entry for the car altima
+                .withChild(ImmutableNodes.mapEntryBuilder(CAR_QNAME, CAR_NAME_QNAME, "altima")
+                    .withChild(ImmutableNodes.leafNode(CAR_NAME_QNAME, "altima"))
+                    .withChild(ImmutableNodes.leafNode(CAR_PRICE_QNAME, Uint64.valueOf(1000)))
+                    .build())
+                // Create an entry for the car accord
+                .withChild(ImmutableNodes.mapEntryBuilder(CAR_QNAME, CAR_NAME_QNAME, "accord")
+                    .withChild(ImmutableNodes.leafNode(CAR_NAME_QNAME, "accord"))
+                    .withChild(ImmutableNodes.leafNode(CAR_PRICE_QNAME, Uint64.valueOf("2000")))
+                    .build())
+                .build())
             .build();
-
     }
 
     public static NormalizedNode createEmptyCarsList() {
@@ -72,13 +59,14 @@ public final class CarsModel {
     }
 
     public static ContainerNode newCarsNode(final MapNode carsList) {
-        return ImmutableContainerNodeBuilder.create().withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(
-                BASE_QNAME)).withChild(carsList).build();
+        return Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(BASE_QNAME))
+            .withChild(carsList)
+            .build();
     }
 
     public static MapNode newCarsMapNode(final MapEntryNode... carEntries) {
-        CollectionNodeBuilder<MapEntryNode, SystemMapNode> builder = ImmutableMapNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(CAR_QNAME));
+        var builder = Builders.mapBuilder().withNodeIdentifier(new NodeIdentifier(CAR_QNAME));
         for (MapEntryNode e : carEntries) {
             builder.withChild(e);
         }
@@ -87,9 +75,7 @@ public final class CarsModel {
     }
 
     public static ContainerNode emptyContainer() {
-        return ImmutableContainerNodeBuilder.create()
-            .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(BASE_QNAME))
-            .build();
+        return Builders.containerBuilder().withNodeIdentifier(new NodeIdentifier(BASE_QNAME)).build();
     }
 
     public static SystemMapNode newCarMapNode() {
index 0e041e0af5d52c6b52161714d62f211621502a60..582c5f99761fa148b79d8d2207ae7b4d918aabee 100644 (file)
@@ -10,28 +10,14 @@ package org.opendaylight.controller.md.cluster.datastore.model;
 import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntry;
 import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntryBuilder;
 import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapNodeBuilder;
+import static org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes.leafNode;
 
-import java.util.HashSet;
-import java.util.Set;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
-import org.opendaylight.yangtools.yang.data.api.schema.AugmentationNode;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.SystemMapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.builder.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.api.schema.builder.DataContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetEntryNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetNodeBuilder;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import org.opendaylight.yangtools.yang.test.util.YangParserTestUtils;
 
@@ -99,14 +85,14 @@ public final class CompositeModel {
     private static final String SECOND_GRAND_CHILD_NAME = "second grand child";
 
     private static final MapEntryNode BAR_NODE = mapEntryBuilder(OUTER_LIST_QNAME, ID_QNAME, TWO_ID)
-            .withChild(mapNodeBuilder(INNER_LIST_QNAME)
-                    .withChild(mapEntry(INNER_LIST_QNAME, NAME_QNAME, TWO_ONE_NAME))
-                    .withChild(mapEntry(INNER_LIST_QNAME, NAME_QNAME, TWO_TWO_NAME))
-                    .build())
-            .build();
+        .withChild(mapNodeBuilder(INNER_LIST_QNAME)
+            .withChild(mapEntry(INNER_LIST_QNAME, NAME_QNAME, TWO_ONE_NAME))
+            .withChild(mapEntry(INNER_LIST_QNAME, NAME_QNAME, TWO_TWO_NAME))
+            .build())
+        .build();
 
     private CompositeModel() {
-
+        // Hidden on purpose
     }
 
     public static SchemaContext createTestContext() {
@@ -115,75 +101,48 @@ public final class CompositeModel {
     }
 
     public static ContainerNode createTestContainer() {
-        final LeafSetEntryNode<Object> nike = ImmutableLeafSetEntryNodeBuilder.create()
-                .withNodeIdentifier(new NodeWithValue<>(QName.create(TEST_QNAME, "shoe"), "nike"))
-                .withValue("nike").build();
-        final LeafSetEntryNode<Object> puma = ImmutableLeafSetEntryNodeBuilder.create()
-                .withNodeIdentifier(new NodeWithValue<>(QName.create(TEST_QNAME, "shoe"), "puma"))
-                .withValue("puma").build();
-        final LeafSetNode<Object> shoes = ImmutableLeafSetNodeBuilder.create()
+        return ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+            .withChild(leafNode(DESC_QNAME, DESC))
+            .withChild(leafNode(AUG_QNAME, "First Test"))
+            .withChild(ImmutableNodes.<String>newSystemLeafSetBuilder()
                 .withNodeIdentifier(new NodeIdentifier(QName.create(TEST_QNAME, "shoe")))
-                .withChild(nike).withChild(puma).build();
-
-        final LeafSetEntryNode<Object> five = ImmutableLeafSetEntryNodeBuilder.create()
-                .withNodeIdentifier(new NodeWithValue<>(QName.create(TEST_QNAME, "number"), 5))
-                .withValue(5).build();
-        final LeafSetEntryNode<Object> fifteen = ImmutableLeafSetEntryNodeBuilder.create()
-                .withNodeIdentifier(new NodeWithValue<>(QName.create(TEST_QNAME, "number"), 15))
-                .withValue(15).build();
-        final LeafSetNode<Object> numbers = ImmutableLeafSetNodeBuilder.create()
+                .withChildValue("nike")
+                .withChildValue("puma")
+                .build())
+            .withChild(ImmutableNodes.<Integer>newSystemLeafSetBuilder()
                 .withNodeIdentifier(new NodeIdentifier(QName.create(TEST_QNAME, "number")))
-                .withChild(five).withChild(fifteen).build();
-
-        Set<QName> childAugmentations = new HashSet<>();
-        childAugmentations.add(AUG_QNAME);
-        final AugmentationIdentifier augmentationIdentifier = new AugmentationIdentifier(childAugmentations);
-        final AugmentationNode augmentationNode = Builders.augmentationBuilder()
-                .withNodeIdentifier(augmentationIdentifier).withChild(ImmutableNodes.leafNode(AUG_QNAME, "First Test"))
-                .build();
-        return ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
-                .withChild(ImmutableNodes.leafNode(DESC_QNAME, DESC)).withChild(augmentationNode).withChild(shoes)
-                .withChild(numbers).withChild(mapNodeBuilder(OUTER_LIST_QNAME)
-                        .withChild(mapEntry(OUTER_LIST_QNAME, ID_QNAME, ONE_ID)).withChild(BAR_NODE).build())
-                .build();
+                .withChildValue(5)
+                .withChildValue(15)
+                .build())
+            .withChild(mapNodeBuilder(OUTER_LIST_QNAME)
+                .withChild(mapEntry(OUTER_LIST_QNAME, ID_QNAME, ONE_ID))
+                .withChild(BAR_NODE)
+                .build())
+            .build();
     }
 
     public static ContainerNode createFamily() {
-        final DataContainerNodeBuilder<NodeIdentifier, ContainerNode> familyContainerBuilder =
-            ImmutableContainerNodeBuilder.create().withNodeIdentifier(new NodeIdentifier(FAMILY_QNAME));
-
-        final CollectionNodeBuilder<MapEntryNode, SystemMapNode> childrenBuilder = mapNodeBuilder(CHILDREN_QNAME);
-
-        final DataContainerNodeBuilder<NodeIdentifierWithPredicates, MapEntryNode>
-            firstChildBuilder = mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, FIRST_CHILD_ID);
-        final DataContainerNodeBuilder<NodeIdentifierWithPredicates, MapEntryNode>
-            secondChildBuilder = mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, SECOND_CHILD_ID);
-
-        final DataContainerNodeBuilder<NodeIdentifierWithPredicates, MapEntryNode>
-            firstGrandChildBuilder = mapEntryBuilder(GRAND_CHILDREN_QNAME, GRAND_CHILD_NUMBER_QNAME,
-                    FIRST_GRAND_CHILD_ID);
-        final DataContainerNodeBuilder<NodeIdentifierWithPredicates, MapEntryNode>
-            secondGrandChildBuilder = mapEntryBuilder(GRAND_CHILDREN_QNAME, GRAND_CHILD_NUMBER_QNAME,
-                    SECOND_GRAND_CHILD_ID);
-
-        firstGrandChildBuilder.withChild(ImmutableNodes.leafNode(GRAND_CHILD_NUMBER_QNAME, FIRST_GRAND_CHILD_ID))
-                .withChild(ImmutableNodes.leafNode(GRAND_CHILD_NAME_QNAME, FIRST_GRAND_CHILD_NAME));
-
-        secondGrandChildBuilder.withChild(ImmutableNodes.leafNode(GRAND_CHILD_NUMBER_QNAME, SECOND_GRAND_CHILD_ID))
-                .withChild(ImmutableNodes.leafNode(GRAND_CHILD_NAME_QNAME, SECOND_GRAND_CHILD_NAME));
-
-        firstChildBuilder.withChild(ImmutableNodes.leafNode(CHILD_NUMBER_QNAME, FIRST_CHILD_ID))
-                .withChild(ImmutableNodes.leafNode(CHILD_NAME_QNAME, FIRST_CHILD_NAME))
-                .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME).withChild(firstGrandChildBuilder.build()).build());
-
-        secondChildBuilder.withChild(ImmutableNodes.leafNode(CHILD_NUMBER_QNAME, SECOND_CHILD_ID))
-                .withChild(ImmutableNodes.leafNode(CHILD_NAME_QNAME, SECOND_CHILD_NAME))
-                .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME).withChild(firstGrandChildBuilder.build()).build());
-
-        childrenBuilder.withChild(firstChildBuilder.build());
-        childrenBuilder.withChild(secondChildBuilder.build());
+        final var firstGrandChild =
+            mapEntryBuilder(GRAND_CHILDREN_QNAME, GRAND_CHILD_NUMBER_QNAME, FIRST_GRAND_CHILD_ID)
+                .withChild(leafNode(GRAND_CHILD_NUMBER_QNAME, FIRST_GRAND_CHILD_ID))
+                .withChild(leafNode(GRAND_CHILD_NAME_QNAME, FIRST_GRAND_CHILD_NAME))
+                .build();
 
-        return familyContainerBuilder.withChild(childrenBuilder.build()).build();
+        return ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(FAMILY_QNAME))
+            .withChild(mapNodeBuilder(CHILDREN_QNAME)
+                .withChild(mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, FIRST_CHILD_ID)
+                    .withChild(leafNode(CHILD_NUMBER_QNAME, FIRST_CHILD_ID))
+                    .withChild(leafNode(CHILD_NAME_QNAME, FIRST_CHILD_NAME))
+                    .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME).withChild(firstGrandChild).build())
+                    .build())
+                .withChild(mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, SECOND_CHILD_ID)
+                    .withChild(leafNode(CHILD_NUMBER_QNAME, SECOND_CHILD_ID))
+                    .withChild(leafNode(CHILD_NAME_QNAME, SECOND_CHILD_NAME))
+                    .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME).withChild(firstGrandChild).build())
+                    .build())
+                .build())
+            .build();
     }
 }
index 2672943685d466a537f804bdddd9b34cc2dc8170..725faf56f1e5663eb66c9936738495e9683167d3 100644 (file)
@@ -7,15 +7,15 @@
  */
 package org.opendaylight.controller.md.cluster.datastore.model;
 
+import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntryBuilder;
+
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
 import org.opendaylight.yangtools.yang.data.api.schema.SystemMapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.builder.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableMapNodeBuilder;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 
 public final class PeopleModel {
     public static final QName BASE_QNAME = QName.create(
@@ -30,59 +30,45 @@ public final class PeopleModel {
     public static final YangInstanceIdentifier PERSON_LIST_PATH = BASE_PATH.node(PERSON_QNAME);
 
     private PeopleModel() {
-
+        // Hidden on purpose
     }
 
     public static ContainerNode create() {
-
-        // Create a list builder
-        CollectionNodeBuilder<MapEntryNode, SystemMapNode> cars =
-            ImmutableMapNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(
-                    PERSON_QNAME));
-
-        // Create an entry for the person jack
-        MapEntryNode jack =
-            ImmutableNodes.mapEntryBuilder(PERSON_QNAME, PERSON_NAME_QNAME, "jack")
-                .withChild(ImmutableNodes.leafNode(PERSON_NAME_QNAME, "jack"))
-                .withChild(ImmutableNodes.leafNode(PERSON_AGE_QNAME, 100L))
-                .build();
-
-        // Create an entry for the person jill
-        MapEntryNode jill =
-            ImmutableNodes.mapEntryBuilder(PERSON_QNAME, PERSON_NAME_QNAME, "jill")
-                .withChild(ImmutableNodes.leafNode(PERSON_NAME_QNAME, "jill"))
-                .withChild(ImmutableNodes.leafNode(PERSON_AGE_QNAME, 200L))
-                .build();
-
-        cars.withChild(jack);
-        cars.withChild(jill);
-
-        return ImmutableContainerNodeBuilder.create()
-            .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(BASE_QNAME))
-            .withChild(cars.build())
+        return ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(BASE_QNAME))
+            .withChild(ImmutableNodes.newSystemMapBuilder()
+                .withNodeIdentifier(new NodeIdentifier(PERSON_QNAME))
+                // Create an entry for the person jack
+                .withChild(mapEntryBuilder(PERSON_QNAME, PERSON_NAME_QNAME, "jack")
+                    .withChild(ImmutableNodes.leafNode(PERSON_NAME_QNAME, "jack"))
+                    .withChild(ImmutableNodes.leafNode(PERSON_AGE_QNAME, 100L))
+                    .build())
+                // Create an entry for the person jill
+                .withChild(mapEntryBuilder(PERSON_QNAME, PERSON_NAME_QNAME, "jill")
+                    .withChild(ImmutableNodes.leafNode(PERSON_NAME_QNAME, "jill"))
+                    .withChild(ImmutableNodes.leafNode(PERSON_AGE_QNAME, 200L))
+                    .build())
+                .build())
             .build();
-
     }
 
     public static ContainerNode emptyContainer() {
-        return ImmutableContainerNodeBuilder.create()
-            .withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(BASE_QNAME))
-            .build();
+        return ImmutableNodes.newContainerBuilder().withNodeIdentifier(new NodeIdentifier(BASE_QNAME)).build();
     }
 
     public static SystemMapNode newPersonMapNode() {
-        return ImmutableNodes.mapNodeBuilder(PERSON_QNAME).build();
+        return ImmutableNodes.newSystemMapBuilder().withNodeIdentifier(new NodeIdentifier(PERSON_QNAME)).build();
     }
 
     public static MapEntryNode newPersonEntry(final String name) {
-        return ImmutableNodes.mapEntryBuilder(PERSON_QNAME, PERSON_NAME_QNAME, name)
-                .withChild(ImmutableNodes.leafNode(PERSON_NAME_QNAME, name)).build();
+        return mapEntryBuilder(PERSON_QNAME, PERSON_NAME_QNAME, name)
+            .withChild(ImmutableNodes.leafNode(PERSON_NAME_QNAME, name))
+            .build();
     }
 
     public static YangInstanceIdentifier newPersonPath(final String name) {
         return YangInstanceIdentifier.builder(PERSON_LIST_PATH)
-                .nodeWithKey(PERSON_QNAME, PERSON_NAME_QNAME, name).build();
+            .nodeWithKey(PERSON_QNAME, PERSON_NAME_QNAME, name)
+            .build();
     }
 }
index 5555b887d29e94205fccbae51f5095bc4f1759f5..31a928256162e10e91b0479752b2279425bbf532 100644 (file)
@@ -9,14 +9,13 @@ package org.opendaylight.controller.md.cluster.datastore.model;
 
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.SystemMapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.builder.CollectionNodeBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.opendaylight.yangtools.yang.test.util.YangParserTestUtils;
 
@@ -65,7 +64,7 @@ public final class TestModel {
     }
 
     public static DataContainerChild outerNode(final int... ids) {
-        CollectionNodeBuilder<MapEntryNode, SystemMapNode> outer = ImmutableNodes.mapNodeBuilder(OUTER_LIST_QNAME);
+        var outer = ImmutableNodes.mapNodeBuilder(OUTER_LIST_QNAME);
         for (int id: ids) {
             outer.addChild(ImmutableNodes.mapEntry(OUTER_LIST_QNAME, ID_QNAME, id));
         }
@@ -74,7 +73,7 @@ public final class TestModel {
     }
 
     public static DataContainerChild outerNode(final MapEntryNode... entries) {
-        CollectionNodeBuilder<MapEntryNode, SystemMapNode> outer = ImmutableNodes.mapNodeBuilder(OUTER_LIST_QNAME);
+        var outer = ImmutableNodes.mapNodeBuilder(OUTER_LIST_QNAME);
         for (MapEntryNode e: entries) {
             outer.addChild(e);
         }
@@ -83,7 +82,7 @@ public final class TestModel {
     }
 
     public static DataContainerChild innerNode(final String... names) {
-        CollectionNodeBuilder<MapEntryNode, SystemMapNode> outer = ImmutableNodes.mapNodeBuilder(INNER_LIST_QNAME);
+        var outer = ImmutableNodes.mapNodeBuilder(INNER_LIST_QNAME);
         for (String name: names) {
             outer.addChild(ImmutableNodes.mapEntry(INNER_LIST_QNAME, NAME_QNAME, name));
         }
@@ -95,13 +94,15 @@ public final class TestModel {
         return ImmutableNodes.mapEntryBuilder(OUTER_LIST_QNAME, ID_QNAME, id).addChild(inner).build();
     }
 
-    public static NormalizedNode testNodeWithOuter(final int... ids) {
+    public static ContainerNode testNodeWithOuter(final int... ids) {
         return testNodeWithOuter(outerNode(ids));
     }
 
-    public static NormalizedNode testNodeWithOuter(final DataContainerChild outer) {
-        return ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME)).withChild(outer).build();
+    public static ContainerNode testNodeWithOuter(final DataContainerChild outer) {
+        return Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+            .withChild(outer)
+            .build();
     }
 
     public static NodeIdentifierWithPredicates outerEntryKey(final int id) {
index a7b22c6e3314b42bdf9cec1c8617d8ea7af0b6cd..d534372d24d015a89234ea6c64dd516ae5cc0360 100644 (file)
@@ -1 +1 @@
-{"Entries":[{"Entry":[{"Node":[{"Path":"/"},{"ModificationType":"UNMODIFIED"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{identifier=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=1}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{identifier=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=2}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{identifier=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=3}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{identifier=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=4}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{identifier=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=5}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{identifier=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=6}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{identifier=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=7}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{identifier=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=8}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{identifier=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=9}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{identifier=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=10}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{identifier=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=11}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{identifier=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=12}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{identifier=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=13}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{identifier=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=14}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{identifier=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=15}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{identifier=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=16}]"}]}]}]}
\ No newline at end of file
+{"Entries":[{"Entry":[{"Node":[{"Path":"/"},{"ModificationType":"UNMODIFIED"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=1}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=2}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=3}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=4}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=5}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=6}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=7}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=8}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=9}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=10}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=11}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=12}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=13}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=14}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=15}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=16}]"}]}]}]}
\ No newline at end of file
index eebc201ca11f73acf5b62d45483d62f7a8287b48..7a0b83212f669b33b7092f0fead633064b065bb4 100644 (file)
@@ -73,4 +73,4 @@ Member1 {
       ]
     }
   }
-}
\ No newline at end of file
+}
index 42bffe572e89311f07e6375e760fdc0d4666c185..7e1cffa2ed65f2304eee5090783d379fe4427114 100644 (file)
@@ -4,7 +4,7 @@
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-parent</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../parent</relativePath>
   </parent>
 
index 05f83368f6ec334e8c5ccc2bda7503c14f63bec8..3112c49d92d41b7823a0805952736af5bc86c30f 100644 (file)
@@ -10,7 +10,7 @@ package org.opendaylight.controller.dummy.datastore;
 import akka.actor.Props;
 import akka.actor.UntypedAbstractActor;
 
-public class DummyShardManager extends UntypedAbstractActor {
+public final class DummyShardManager extends UntypedAbstractActor {
     public DummyShardManager(final Configuration configuration, final String memberName, final String[] shardNames,
             final String type) {
         new DummyShardsCreator(configuration, getContext(), memberName, shardNames, type).create();
index 3b65b1c8e633cc8681237f489ac1fba853468131..c98a44d8eb5f99d0c55f7c27bf8f63928f64089a 100644 (file)
@@ -4,7 +4,7 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>mdsal-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../parent</relativePath>
     </parent>
 
 
     <dependencies>
         <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>repackaged-akka</artifactId>
+            <groupId>com.github.spotbugs</groupId>
+            <artifactId>spotbugs-annotations</artifactId>
+            <optional>true</optional>
         </dependency>
-        <!-- SAL Dependencies -->
         <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>sal-common-util</artifactId>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
         </dependency>
         <dependency>
-            <groupId>org.opendaylight.mdsal</groupId>
-           <artifactId>mdsal-dom-api</artifactId>
+            <groupId>com.typesafe</groupId>
+            <artifactId>config</artifactId>
         </dependency>
         <dependency>
-            <groupId>org.opendaylight.mdsal</groupId>
-           <artifactId>mdsal-dom-spi</artifactId>
+            <groupId>org.eclipse.jdt</groupId>
+            <artifactId>org.eclipse.jdt.annotation</artifactId>
         </dependency>
         <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>sal-clustering-commons</artifactId>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>concepts</artifactId>
         </dependency>
-        <!-- Yang tools-->
         <dependency>
             <groupId>org.opendaylight.yangtools</groupId>
-            <artifactId>yang-data-api</artifactId>
+            <artifactId>yang-common</artifactId>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.yangtools</groupId>
-            <artifactId>yang-model-api</artifactId>
+            <artifactId>yang-data-api</artifactId>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.yangtools</groupId>
-            <artifactId>yang-data-impl</artifactId>
+            <artifactId>yang-data-codec-binfmt</artifactId>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.yangtools</groupId>
-            <artifactId>yang-common</artifactId>
+            <artifactId>yang-model-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>repackaged-akka</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>sal-common-util</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+           <artifactId>mdsal-common-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+           <artifactId>mdsal-dom-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+           <artifactId>mdsal-dom-spi</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>sal-clustering-commons</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.osgi</groupId>
+            <artifactId>org.osgi.service.component.annotations</artifactId>
         </dependency>
         <dependency>
             <groupId>org.osgi</groupId>
-            <artifactId>osgi.cmpn</artifactId>
+            <artifactId>org.osgi.service.metatype.annotations</artifactId>
         </dependency>
         <dependency>
             <groupId>org.scala-lang</groupId>
             <artifactId>scala-library</artifactId>
         </dependency>
+
         <!-- Test Dependencies -->
         <dependency>
             <groupId>com.typesafe.akka</groupId>
             <version>1.0</version>
             <scope>test</scope>
         </dependency>
+        <dependency>
+            <groupId>org.apache.commons</groupId>
+            <artifactId>commons-lang3</artifactId>
+                       <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>util</artifactId>
+                       <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-data-impl</artifactId>
+                       <scope>test</scope>
+        </dependency>
         <dependency>
             <groupId>org.opendaylight.yangtools</groupId>
             <artifactId>yang-test-util</artifactId>
             <type>test-jar</type>
             <scope>test</scope>
         </dependency>
-        <dependency>
-            <groupId>commons-lang</groupId>
-            <artifactId>commons-lang</artifactId>
-            <scope>test</scope>
-        </dependency>
     </dependencies>
 
     <build>
index 605337111c15c512333792e9d69788ada2c16578..9de0152be0f736125625988e4758eab48461bee7 100644 (file)
@@ -25,7 +25,7 @@ import org.osgi.service.metatype.annotations.ObjectClassDefinition;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-@Component(immediate = true, configurationPid = "org.opendaylight.controller.remoterpc")
+@Component(configurationPid = "org.opendaylight.controller.remoterpc")
 @Designate(ocd = OSGiRemoteOpsProvider.Config.class)
 public final class OSGiRemoteOpsProvider {
     @ObjectClassDefinition()
@@ -38,21 +38,13 @@ public final class OSGiRemoteOpsProvider {
 
     private static final Logger LOG = LoggerFactory.getLogger(OSGiRemoteOpsProvider.class);
 
-    @Reference
-    ActorSystemProvider actorSystemProvider = null;
-    @Reference
-    DOMRpcProviderService rpcProviderService = null;
-    @Reference
-    DOMRpcService rpcService = null;
-    @Reference
-    DOMActionProviderService actionProviderService = null;
-    @Reference
-    DOMActionService actionService = null;
-
     private ActorRef opsManager;
 
     @Activate
-    void activate(final Config config) {
+    public OSGiRemoteOpsProvider(@Reference final ActorSystemProvider actorSystemProvider,
+            @Reference final DOMRpcProviderService rpcProviderService, @Reference final DOMRpcService rpcService,
+            @Reference final DOMActionProviderService actionProviderService,
+            @Reference final DOMActionService actionService, final Config config) {
         LOG.info("Remote Operations service starting");
         final ActorSystem actorSystem = actorSystemProvider.getActorSystem();
         final RemoteOpsProviderConfig opsConfig = RemoteOpsProviderConfig.newInstance(actorSystem.name(),
index 4ac3867d67382fe00587378b4a37d1ed42c1d9d1..dcb930e8ca3482c12d471625e7e62e4eb379591c 100644 (file)
@@ -33,7 +33,6 @@ import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.common.RpcError;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
 import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
 
 /**
@@ -69,11 +68,11 @@ final class OpsInvoker extends AbstractUntypedActor {
 
     @Override
     protected void handleReceive(final Object message) {
-        if (message instanceof ExecuteRpc) {
+        if (message instanceof ExecuteRpc executeRpc) {
             LOG.debug("Handling ExecuteOps Message");
-            execute((ExecuteRpc) message);
-        } else if (message instanceof ExecuteAction) {
-            execute((ExecuteAction) message);
+            execute(executeRpc);
+        } else if (message instanceof ExecuteAction executeAction) {
+            execute(executeAction);
         } else {
             unknownMessage(message);
         }
@@ -102,8 +101,8 @@ final class OpsInvoker extends AbstractUntypedActor {
 
             @Override
             Object response(final QName type, final DOMRpcResult result) {
-                final Collection<? extends RpcError> errors = result.getErrors();
-                return errors.isEmpty() ? new RpcResponse(result.getResult())
+                final Collection<? extends RpcError> errors = result.errors();
+                return errors.isEmpty() ? new RpcResponse(result.value())
                         // This is legacy (wrong) behavior, which ignores the fact that errors may be just warnings,
                         // discarding any output
                         : new Failure(new RpcErrorsException(String.format("Execution of rpc %s failed", type),
index 40e926804630ff8b5507c6005a655025f401c2f5..ee0adbc15166a5cbe33bdadf3f3d0e5d9710c5db 100644 (file)
@@ -22,7 +22,7 @@ import org.opendaylight.mdsal.dom.api.DOMActionProviderService;
 import org.opendaylight.mdsal.dom.api.DOMActionService;
 import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
 import org.opendaylight.mdsal.dom.api.DOMRpcService;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import scala.concurrent.duration.FiniteDuration;
 
 /**
@@ -36,7 +36,7 @@ public class OpsManager extends AbstractUntypedActor {
     private final DOMActionProviderService actionProvisionRegistry;
     private final DOMActionService actionService;
 
-    private ListenerRegistration<OpsListener> listenerReg;
+    private Registration listenerReg;
     private ActorRef opsInvoker;
     private ActorRef actionRegistry;
     private ActorRef rpcRegistry;
@@ -48,7 +48,7 @@ public class OpsManager extends AbstractUntypedActor {
         this.rpcProvisionRegistry = requireNonNull(rpcProvisionRegistry);
         this.rpcServices = requireNonNull(rpcServices);
         this.config = requireNonNull(config);
-        this.actionProvisionRegistry = requireNonNull(actionProviderService);
+        actionProvisionRegistry = requireNonNull(actionProviderService);
         this.actionService = requireNonNull(actionService);
     }
 
index a16bbd4eb5236413d036bccc3a6cbf6c922dfb34..4d11a5414ec871fbd480658e5be93b409da1821a 100644 (file)
@@ -12,7 +12,6 @@ import static java.util.Objects.requireNonNull;
 import akka.actor.Address;
 import akka.actor.Props;
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -22,19 +21,17 @@ import org.opendaylight.controller.remote.rpc.registry.ActionRegistry.Messages.U
 import org.opendaylight.controller.remote.rpc.registry.ActionRegistry.RemoteActionEndpoint;
 import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.UpdateRemoteEndpoints;
 import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.RemoteRpcEndpoint;
-import org.opendaylight.mdsal.dom.api.DOMActionImplementation;
 import org.opendaylight.mdsal.dom.api.DOMActionProviderService;
-import org.opendaylight.mdsal.dom.api.DOMRpcImplementation;
 import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 
 /**
  * Actor handling registration of RPCs and Actions available on remote nodes with the local
  * {@link DOMRpcProviderService} and {@link DOMActionProviderService}.
  */
 final class OpsRegistrar extends AbstractUntypedActor {
-    private final Map<Address, ObjectRegistration<DOMRpcImplementation>> rpcRegs = new HashMap<>();
-    private final Map<Address, ObjectRegistration<DOMActionImplementation>> actionRegs = new HashMap<>();
+    private final Map<Address, Registration> rpcRegs = new HashMap<>();
+    private final Map<Address, Registration> actionRegs = new HashMap<>();
     private final DOMRpcProviderService rpcProviderService;
     private final RemoteOpsProviderConfig config;
     private final DOMActionProviderService actionProviderService;
@@ -55,9 +52,9 @@ final class OpsRegistrar extends AbstractUntypedActor {
 
     @Override
     public void postStop() throws Exception {
-        rpcRegs.values().forEach(ObjectRegistration::close);
+        rpcRegs.values().forEach(Registration::close);
         rpcRegs.clear();
-        actionRegs.values().forEach(ObjectRegistration::close);
+        actionRegs.values().forEach(Registration::close);
         actionRegs.clear();
 
         super.postStop();
@@ -65,12 +62,12 @@ final class OpsRegistrar extends AbstractUntypedActor {
 
     @Override
     protected void handleReceive(final Object message) {
-        if (message instanceof UpdateRemoteEndpoints) {
+        if (message instanceof UpdateRemoteEndpoints updateEndpoints) {
             LOG.debug("Handling updateRemoteEndpoints message");
-            updateRemoteRpcEndpoints(((UpdateRemoteEndpoints) message).getRpcEndpoints());
-        } else if (message instanceof UpdateRemoteActionEndpoints) {
+            updateRemoteRpcEndpoints(updateEndpoints.getRpcEndpoints());
+        } else if (message instanceof UpdateRemoteActionEndpoints updateEndpoints) {
             LOG.debug("Handling updateRemoteActionEndpoints message");
-            updateRemoteActionEndpoints(((UpdateRemoteActionEndpoints) message).getActionEndpoints());
+            updateRemoteActionEndpoints(updateEndpoints.getActionEndpoints());
         } else {
             unknownMessage(message);
         }
@@ -85,15 +82,15 @@ final class OpsRegistrar extends AbstractUntypedActor {
          * Note that when an RPC moves from one remote node to another, we also do not want to expose the gap,
          * hence we register all new implementations before closing all registrations.
          */
-        final Collection<ObjectRegistration<?>> prevRegs = new ArrayList<>(rpcEndpoints.size());
+        final var prevRegs = new ArrayList<Registration>(rpcEndpoints.size());
 
         for (Entry<Address, Optional<RemoteRpcEndpoint>> e : rpcEndpoints.entrySet()) {
             LOG.debug("Updating RPC registrations for {}", e.getKey());
 
-            final ObjectRegistration<DOMRpcImplementation> prevReg;
+            final Registration prevReg;
             final Optional<RemoteRpcEndpoint> maybeEndpoint = e.getValue();
             if (maybeEndpoint.isPresent()) {
-                final RemoteRpcEndpoint endpoint = maybeEndpoint.get();
+                final RemoteRpcEndpoint endpoint = maybeEndpoint.orElseThrow();
                 final RemoteRpcImplementation impl = new RemoteRpcImplementation(endpoint.getRouter(), config);
                 prevReg = rpcRegs.put(e.getKey(), rpcProviderService.registerRpcImplementation(impl,
                     endpoint.getRpcs()));
@@ -106,7 +103,7 @@ final class OpsRegistrar extends AbstractUntypedActor {
             }
         }
 
-        prevRegs.forEach(ObjectRegistration::close);
+        prevRegs.forEach(Registration::close);
     }
 
     /**
@@ -121,15 +118,15 @@ final class OpsRegistrar extends AbstractUntypedActor {
          * Note that when an Action moves from one remote node to another, we also do not want to expose the gap,
          * hence we register all new implementations before closing all registrations.
          */
-        final Collection<ObjectRegistration<?>> prevRegs = new ArrayList<>(actionEndpoints.size());
+        final var prevRegs = new ArrayList<Registration>(actionEndpoints.size());
 
         for (Entry<Address, Optional<RemoteActionEndpoint>> e : actionEndpoints.entrySet()) {
             LOG.debug("Updating action registrations for {}", e.getKey());
 
-            final ObjectRegistration<DOMActionImplementation> prevReg;
+            final Registration prevReg;
             final Optional<RemoteActionEndpoint> maybeEndpoint = e.getValue();
             if (maybeEndpoint.isPresent()) {
-                final RemoteActionEndpoint endpoint = maybeEndpoint.get();
+                final RemoteActionEndpoint endpoint = maybeEndpoint.orElseThrow();
                 final RemoteActionImplementation impl = new RemoteActionImplementation(endpoint.getRouter(), config);
                 prevReg = actionRegs.put(e.getKey(), actionProviderService.registerActionImplementation(impl,
                     endpoint.getActions()));
@@ -142,6 +139,6 @@ final class OpsRegistrar extends AbstractUntypedActor {
             }
         }
 
-        prevRegs.forEach(ObjectRegistration::close);
+        prevRegs.forEach(Registration::close);
     }
 }
index 6b4ab55de4e52c91f3b252cabe7be20943bcb5c7..9d9e29ad36befc04617a39f804fdc003b27fa3eb 100644 (file)
@@ -23,8 +23,7 @@ final class RemoteDOMActionFuture extends AbstractRemoteFuture<Absolute, DOMActi
 
     @Override
     DOMActionResult processReply(final Object reply) {
-        if (reply instanceof ActionResponse) {
-            final ActionResponse actionReply = (ActionResponse) reply;
+        if (reply instanceof ActionResponse actionReply) {
             final ContainerNode output = actionReply.getOutput();
             return output == null ? new SimpleDOMActionResult(actionReply.getErrors())
                     : new SimpleDOMActionResult(output, actionReply.getErrors());
index ef1e635bcbb2ba05864ab4fa3a1b492dc97e9e30..e112d1f9818411067b072516a034b993f04bc237 100644 (file)
@@ -22,7 +22,7 @@ final class RemoteDOMRpcFuture extends AbstractRemoteFuture<QName, DOMRpcResult,
 
     @Override
     DOMRpcResult processReply(final Object reply) {
-        return reply instanceof RpcResponse ? new DefaultDOMRpcResult(((RpcResponse) reply).getOutput()) : null;
+        return reply instanceof RpcResponse response ? new DefaultDOMRpcResult(response.getOutput()) : null;
     }
 
     @Override
index 67da0963ee48df4e9190c5872f0f6f9bb54b5104..3046f7e6dde0284fefa3f6e5bbbc07d3e1a9a374 100644 (file)
@@ -13,7 +13,7 @@ import org.opendaylight.controller.remote.rpc.messages.ExecuteRpc;
 import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
 import org.opendaylight.mdsal.dom.api.DOMRpcImplementation;
 import org.opendaylight.mdsal.dom.api.DOMRpcResult;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 
 /**
  * A {@link DOMRpcImplementation} which routes invocation requests to a remote invoker actor.
@@ -26,8 +26,7 @@ final class RemoteRpcImplementation extends AbstractRemoteImplementation<Execute
     }
 
     @Override
-    public ListenableFuture<DOMRpcResult> invokeRpc(final DOMRpcIdentifier rpc,
-            final NormalizedNode input) {
+    public ListenableFuture<DOMRpcResult> invokeRpc(final DOMRpcIdentifier rpc, final ContainerNode input) {
         return new RemoteDOMRpcFuture(rpc.getType(), ask(ExecuteRpc.from(rpc, input)));
     }
 
index 01e5da8f3a2582d41acdcc1f8404185ffffdcec3..d75805c3fe30cc8b1daf716b7ce9709b6c5437e0 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.remote.rpc;
 
 import java.io.Serializable;
@@ -13,9 +12,10 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 import org.opendaylight.mdsal.dom.api.DOMRpcException;
+import org.opendaylight.yangtools.yang.common.ErrorSeverity;
+import org.opendaylight.yangtools.yang.common.ErrorTag;
+import org.opendaylight.yangtools.yang.common.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorSeverity;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
 
 /**
@@ -24,21 +24,22 @@ import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
  * @author Thomas Pantelis
  */
 public class RpcErrorsException extends DOMRpcException {
-
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private static class RpcErrorData implements Serializable {
+        @java.io.Serial
         private static final long serialVersionUID = 1L;
 
         final ErrorSeverity severity;
         final ErrorType errorType;
-        final String tag;
+        final ErrorTag tag;
         final String applicationTag;
         final String message;
         final String info;
         final Throwable cause;
 
-        RpcErrorData(final ErrorSeverity severity, final ErrorType errorType, final String tag,
+        RpcErrorData(final ErrorSeverity severity, final ErrorType errorType, final ErrorTag tag,
                 final String applicationTag, final String message, final String info, final Throwable cause) {
             this.severity = severity;
             this.errorType = errorType;
@@ -55,7 +56,7 @@ public class RpcErrorsException extends DOMRpcException {
     public RpcErrorsException(final String message, final Iterable<? extends RpcError> rpcErrors) {
         super(message);
 
-        for (final RpcError rpcError: rpcErrors) {
+        for (var rpcError : rpcErrors) {
             rpcErrorDataList.add(new RpcErrorData(rpcError.getSeverity(), rpcError.getErrorType(),
                     rpcError.getTag(), rpcError.getApplicationTag(), rpcError.getMessage(),
                     rpcError.getInfo(), rpcError.getCause()));
@@ -63,8 +64,8 @@ public class RpcErrorsException extends DOMRpcException {
     }
 
     public Collection<RpcError> getRpcErrors() {
-        final Collection<RpcError> rpcErrors = new ArrayList<>();
-        for (final RpcErrorData ed: rpcErrorDataList) {
+        final var rpcErrors = new ArrayList<RpcError>();
+        for (var ed : rpcErrorDataList) {
             final RpcError rpcError = ed.severity == ErrorSeverity.ERROR
                     ? RpcResultBuilder.newError(ed.errorType, ed.tag, ed.message, ed.applicationTag,
                             ed.info, ed.cause) :
index fb034300f9964442e3481d727943430a8a3fbe80..a79a4e45d421f2aad2f008e6a41d92ff115c1592 100644 (file)
@@ -22,11 +22,9 @@ public class TerminationMonitor extends UntypedAbstractActor {
 
     @Override
     public void onReceive(final Object message) {
-        if (message instanceof Terminated) {
-            Terminated terminated = (Terminated) message;
+        if (message instanceof Terminated terminated) {
             LOG.debug("Actor terminated : {}", terminated.actor());
-        } else if (message instanceof Monitor) {
-            Monitor monitor = (Monitor) message;
+        } else if (message instanceof Monitor monitor) {
             getContext().watch(monitor.getActorRef());
         }
     }
index cb03c220beeb95410ec66817b922907679e47a64..c462f7b5c0e141a3fca6ec64ae8ef49476ab81db 100644 (file)
@@ -22,7 +22,6 @@ import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
-import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
 import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
 import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier;
 import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
@@ -75,10 +74,10 @@ public final class ExecuteAction extends AbstractExecute<Absolute, @NonNull Cont
 
         @Override
         public void writeExternal(final ObjectOutput out) throws IOException {
-            try (NormalizedNodeDataOutput stream = NormalizedNodeStreamVersion.current().newDataOutput(out)) {
+            try (var stream = NormalizedNodeStreamVersion.current().newDataOutput(out)) {
                 stream.writeSchemaNodeIdentifier(executeAction.getType());
-                executeAction.getPath().getDatastoreType().writeTo(out);
-                stream.writeYangInstanceIdentifier(executeAction.getPath().getRootIdentifier());
+                executeAction.getPath().datastore().writeTo(out);
+                stream.writeYangInstanceIdentifier(executeAction.getPath().path());
                 stream.writeOptionalNormalizedNode(executeAction.getInput());
             }
         }
@@ -87,7 +86,7 @@ public final class ExecuteAction extends AbstractExecute<Absolute, @NonNull Cont
         public void readExternal(final ObjectInput in) throws IOException {
             final NormalizedNodeDataInput stream = NormalizedNodeDataInput.newDataInput(in);
             final SchemaNodeIdentifier sni = stream.readSchemaNodeIdentifier();
-            if (!(sni instanceof Absolute)) {
+            if (!(sni instanceof Absolute absolute)) {
                 throw new InvalidObjectException("Non-absolute type " + sni);
             }
 
@@ -95,7 +94,7 @@ public final class ExecuteAction extends AbstractExecute<Absolute, @NonNull Cont
             final YangInstanceIdentifier path = stream.readYangInstanceIdentifier();
             final ContainerNode input = (ContainerNode) stream.readOptionalNormalizedNode().orElse(null);
 
-            executeAction = new ExecuteAction((Absolute) sni, new DOMDataTreeIdentifier(type, path), input);
+            executeAction = new ExecuteAction(absolute, DOMDataTreeIdentifier.of(type, path), input);
         }
 
         private Object readResolve() {
index 873c5b89015ef3f9b4dc8a8279b2a6a9eed3d4c6..d9f6a67b5f2ac7efcd53fab50d258ff8484b96ef 100644 (file)
@@ -17,20 +17,20 @@ import org.eclipse.jdt.annotation.NonNull;
 import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
 import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
 import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
 import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
 
-public final class ExecuteRpc extends AbstractExecute<QName, @Nullable NormalizedNode> {
+public final class ExecuteRpc extends AbstractExecute<QName, @Nullable ContainerNode> {
     private static final long serialVersionUID = 1128904894827335676L;
 
-    private ExecuteRpc(final @NonNull QName type, final @Nullable NormalizedNode input) {
+    private ExecuteRpc(final @NonNull QName type, final @Nullable ContainerNode input) {
         super(type, input);
     }
 
     public static @NonNull ExecuteRpc from(final @NonNull DOMRpcIdentifier rpc,
-            final @Nullable NormalizedNode input) {
+            final @Nullable ContainerNode input) {
         return new ExecuteRpc(rpc.getType(), input);
     }
 
@@ -67,7 +67,7 @@ public final class ExecuteRpc extends AbstractExecute<QName, @Nullable Normalize
         public void readExternal(final ObjectInput in) throws IOException {
             final NormalizedNodeDataInput stream = NormalizedNodeDataInput.newDataInput(in);
             final QName type = stream.readQName();
-            final NormalizedNode input = stream.readOptionalNormalizedNode().orElse(null);
+            final ContainerNode input = RpcResponse.unmaskContainer(stream.readOptionalNormalizedNode());
             executeRpc = new ExecuteRpc(type, input);
         }
 
index 97a515514001f5f8e1aa079886d2c955b5adbb10..bb308203ddd789fcf783c7ab6e8e0abadd2ca63a 100644 (file)
@@ -9,16 +9,19 @@ package org.opendaylight.controller.remote.rpc.messages;
 
 import java.io.Externalizable;
 import java.io.IOException;
+import java.io.InvalidObjectException;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
+import java.util.Optional;
 import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.datastore.node.utils.stream.SerializationUtils;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
-public class RpcResponse extends AbstractResponse<NormalizedNode> {
+public class RpcResponse extends AbstractResponse<ContainerNode> {
     private static final long serialVersionUID = -4211279498688989245L;
 
-    public RpcResponse(final @Nullable NormalizedNode output) {
+    public RpcResponse(final @Nullable ContainerNode output) {
         super(output);
     }
 
@@ -27,6 +30,18 @@ public class RpcResponse extends AbstractResponse<NormalizedNode> {
         return new Proxy(this);
     }
 
+    static @Nullable ContainerNode unmaskContainer(final Optional<NormalizedNode> optNode)
+            throws InvalidObjectException {
+        if (optNode.isEmpty()) {
+            return null;
+        }
+        final var node = optNode.orElseThrow();
+        if (node instanceof ContainerNode container) {
+            return container;
+        }
+        throw new InvalidObjectException("Unexpected data " + node.contract().getSimpleName());
+    }
+
     private static class Proxy implements Externalizable {
         private static final long serialVersionUID = 1L;
 
@@ -49,7 +64,7 @@ public class RpcResponse extends AbstractResponse<NormalizedNode> {
 
         @Override
         public void readExternal(final ObjectInput in) throws IOException {
-            rpcResponse = new RpcResponse(SerializationUtils.readNormalizedNode(in).orElse(null));
+            rpcResponse = new RpcResponse(unmaskContainer(SerializationUtils.readNormalizedNode(in)));
         }
 
         private Object readResolve() {
index 96697391485b79f75ccadaa5e148e7d533c5552d..2a91b5ab614c84fb8c1c8c37771513fa82b8af51 100644 (file)
@@ -77,9 +77,9 @@ public class ActionRegistry extends BucketStoreActor<ActionRoutingTable> {
 
     @Override
     protected void handleCommand(final Object message) throws Exception {
-        if (message instanceof ActionRegistry.Messages.UpdateActions) {
+        if (message instanceof ActionRegistry.Messages.UpdateActions updateActions) {
             LOG.debug("handling updatesActionRoutes message");
-            updatesActionRoutes((Messages.UpdateActions) message);
+            updatesActionRoutes(updateActions);
         } else {
             super.handleCommand(message);
         }
@@ -149,11 +149,11 @@ public class ActionRegistry extends BucketStoreActor<ActionRoutingTable> {
             }
 
             Collection<DOMActionInstance> getAddedActions() {
-                return this.addedActions;
+                return addedActions;
             }
 
             Collection<DOMActionInstance> getRemovedActions() {
-                return this.removedActions;
+                return removedActions;
             }
 
 
index dfb05c67f1d6e15a96e23a19375c62085cec3f8d..5ca5a71545dd11df9f46b48227c5ea9d38a75684 100644 (file)
@@ -23,7 +23,6 @@ import java.util.Set;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.mdsal.dom.api.DOMActionInstance;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
 import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
 import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
@@ -63,8 +62,7 @@ public final class ActionRoutingTable extends AbstractRoutingTable<ActionRouting
             for (DOMActionInstance id : actions) {
                 final Absolute type = id.getType();
                 nnout.writeSchemaNodeIdentifier(type);
-                nnout.writeYangInstanceIdentifier(YangInstanceIdentifier.create(new NodeIdentifier(
-                    type.lastNodeIdentifier())));
+                nnout.writeYangInstanceIdentifier(YangInstanceIdentifier.of(type.lastNodeIdentifier()));
             }
         }
 
@@ -78,11 +76,11 @@ public final class ActionRoutingTable extends AbstractRoutingTable<ActionRouting
             actions = new ArrayList<>(size);
             for (int i = 0; i < size; ++i) {
                 final SchemaNodeIdentifier sni = nnin.readSchemaNodeIdentifier();
-                if (!(sni instanceof Absolute)) {
+                if (!(sni instanceof Absolute absolute)) {
                     throw new InvalidObjectException("Non-absolute type " + sni);
                 }
 
-                actions.add(DOMActionInstance.of((Absolute) sni, LogicalDatastoreType.OPERATIONAL,
+                actions.add(DOMActionInstance.of(absolute, LogicalDatastoreType.OPERATIONAL,
                         nnin.readYangInstanceIdentifier()));
             }
         }
index 2c89f1426072a6e945de5ed107fd3b07a53ef7df..8d66ed8ccb163abc56891009bfe09cfdae29add6 100644 (file)
@@ -81,10 +81,10 @@ public class RpcRegistry extends BucketStoreActor<RoutingTable> {
 
     @Override
     protected void handleCommand(final Object message) throws Exception {
-        if (message instanceof AddOrUpdateRoutes) {
-            receiveAddRoutes((AddOrUpdateRoutes) message);
-        } else if (message instanceof RemoveRoutes) {
-            receiveRemoveRoutes((RemoveRoutes) message);
+        if (message instanceof AddOrUpdateRoutes addRoutes) {
+            receiveAddRoutes(addRoutes);
+        } else if (message instanceof RemoveRoutes removeRoutes) {
+            receiveRemoveRoutes(removeRoutes);
         } else {
             super.handleCommand(message);
         }
@@ -161,7 +161,7 @@ public class RpcRegistry extends BucketStoreActor<RoutingTable> {
             }
 
             List<DOMRpcIdentifier> getRouteIdentifiers() {
-                return this.rpcRouteIdentifiers;
+                return rpcRouteIdentifiers;
             }
 
             @Override
index e06e5fb15bfaf38cb7b522eb9f57efe3d6920c75..efbd63cd211ab4548252715530c313b484ef04d6 100644 (file)
@@ -7,6 +7,7 @@
  */
 package org.opendaylight.controller.remote.rpc.registry.gossip;
 
+import static java.util.Objects.requireNonNull;
 import static org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreActor.getBucketsByMembersMessage;
 import static org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreActor.getLocalDataMessage;
 import static org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreActor.getRemoteBucketsMessage;
@@ -18,21 +19,16 @@ import akka.actor.Address;
 import akka.dispatch.OnComplete;
 import akka.pattern.Patterns;
 import akka.util.Timeout;
-import com.google.common.annotations.Beta;
 import com.google.common.annotations.VisibleForTesting;
 import java.util.Collection;
 import java.util.Map;
-import java.util.Objects;
 import java.util.function.Consumer;
 import scala.concurrent.ExecutionContext;
 import scala.concurrent.Future;
 
 /**
  * Convenience access to {@link BucketStoreActor}. Used mostly by {@link Gossiper}.
- *
- * @author Robert Varga
  */
-@Beta
 @VisibleForTesting
 public final class BucketStoreAccess {
     private final ActorRef actorRef;
@@ -40,15 +36,15 @@ public final class BucketStoreAccess {
     private final Timeout timeout;
 
     public BucketStoreAccess(final ActorRef actorRef, final ExecutionContext dispatcher, final Timeout timeout) {
-        this.actorRef = Objects.requireNonNull(actorRef);
-        this.dispatcher = Objects.requireNonNull(dispatcher);
-        this.timeout = Objects.requireNonNull(timeout);
+        this.actorRef = requireNonNull(actorRef);
+        this.dispatcher = requireNonNull(dispatcher);
+        this.timeout = requireNonNull(timeout);
     }
 
     <T extends BucketData<T>> void getBucketsByMembers(final Collection<Address> members,
             final Consumer<Map<Address, Bucket<T>>> callback) {
         Patterns.ask(actorRef, getBucketsByMembersMessage(members), timeout)
-            .onComplete(new OnComplete<Object>() {
+            .onComplete(new OnComplete<>() {
                 @SuppressWarnings("unchecked")
                 @Override
                 public void onComplete(final Throwable failure, final Object success) {
@@ -60,7 +56,7 @@ public final class BucketStoreAccess {
     }
 
     void getBucketVersions(final Consumer<Map<Address, Long>> callback) {
-        Patterns.ask(actorRef, Singletons.GET_BUCKET_VERSIONS, timeout).onComplete(new OnComplete<Object>() {
+        Patterns.ask(actorRef, Singletons.GET_BUCKET_VERSIONS, timeout).onComplete(new OnComplete<>() {
             @SuppressWarnings("unchecked")
             @Override
             public void onComplete(final Throwable failure, final Object success) {
@@ -96,9 +92,13 @@ public final class BucketStoreAccess {
     }
 
     public enum Singletons {
-        // Sent from Gossiper to BucketStore, response is an immutable Map<Address, Bucket<?>>
+        /**
+         * Sent from Gossiper to BucketStore, response is an immutable {@code Map&lt;Address, Bucket&lt;?&gt;&gt;}.
+         */
         GET_ALL_BUCKETS,
-        // Sent from Gossiper to BucketStore, response is an immutable Map<Address, Long>
+        /**
+         * Sent from Gossiper to BucketStore, response is an immutable {@code Map&lt;Address, Long&gt;}.
+         */
         GET_BUCKET_VERSIONS,
     }
 }
index b494256d500f2fe53f2660c3216ce503a4b09d91..f155880c0185677e20e4187df79684858f8be934 100644 (file)
@@ -154,18 +154,17 @@ public abstract class BucketStoreActor<T extends BucketData<T>> extends
             return;
         }
 
-        if (message instanceof ExecuteInActor) {
-            ((ExecuteInActor) message).accept(this);
+        if (message instanceof ExecuteInActor execute) {
+            execute.accept(this);
         } else if (GET_BUCKET_VERSIONS == message) {
             // FIXME: do we need to send ourselves?
             getSender().tell(ImmutableMap.copyOf(versions), getSelf());
-        } else if (message instanceof Terminated) {
-            actorTerminated((Terminated) message);
-        } else if (message instanceof DeleteSnapshotsSuccess) {
-            LOG.debug("{}: got command: {}", persistenceId(), message);
-        } else if (message instanceof DeleteSnapshotsFailure) {
-            LOG.warn("{}: failed to delete prior snapshots", persistenceId(),
-                ((DeleteSnapshotsFailure) message).cause());
+        } else if (message instanceof Terminated terminated) {
+            actorTerminated(terminated);
+        } else if (message instanceof DeleteSnapshotsSuccess deleteSuccess) {
+            LOG.debug("{}: got command: {}", persistenceId(), deleteSuccess);
+        } else if (message instanceof DeleteSnapshotsFailure deleteFailure) {
+            LOG.warn("{}: failed to delete prior snapshots", persistenceId(), deleteFailure.cause());
         } else {
             LOG.debug("Unhandled message [{}]", message);
             unhandled(message);
@@ -173,15 +172,14 @@ public abstract class BucketStoreActor<T extends BucketData<T>> extends
     }
 
     private void handleSnapshotMessage(final Object message) {
-        if (message instanceof SaveSnapshotFailure) {
-            LOG.error("{}: failed to persist state", persistenceId(), ((SaveSnapshotFailure) message).cause());
+        if (message instanceof SaveSnapshotFailure saveFailure) {
+            LOG.error("{}: failed to persist state", persistenceId(), saveFailure.cause());
             persisting = false;
             self().tell(PoisonPill.getInstance(), ActorRef.noSender());
-        } else if (message instanceof SaveSnapshotSuccess) {
-            LOG.debug("{}: got command: {}", persistenceId(), message);
-            SaveSnapshotSuccess saved = (SaveSnapshotSuccess)message;
-            deleteSnapshots(new SnapshotSelectionCriteria(scala.Long.MaxValue(),
-                    saved.metadata().timestamp() - 1, 0L, 0L));
+        } else if (message instanceof SaveSnapshotSuccess saveSuccess) {
+            LOG.debug("{}: got command: {}", persistenceId(), saveSuccess);
+            deleteSnapshots(new SnapshotSelectionCriteria(scala.Long.MaxValue(), saveSuccess.metadata().timestamp() - 1,
+                0L, 0L));
             persisting = false;
             unstash();
         } else {
@@ -199,13 +197,13 @@ public abstract class BucketStoreActor<T extends BucketData<T>> extends
                 incarnation = 0;
             }
 
-            this.localBucket = new LocalBucket<>(incarnation.intValue(), initialData);
+            this.localBucket = new LocalBucket<>(incarnation, initialData);
             initialData = null;
             LOG.debug("{}: persisting new incarnation {}", persistenceId(), incarnation);
             persisting = true;
             saveSnapshot(incarnation);
-        } else if (message instanceof SnapshotOffer) {
-            incarnation = (Integer) ((SnapshotOffer)message).snapshot();
+        } else if (message instanceof SnapshotOffer snapshotOffer) {
+            incarnation = (Integer) snapshotOffer.snapshot();
             LOG.debug("{}: recovered incarnation {}", persistenceId(), incarnation);
         } else {
             LOG.warn("{}: ignoring recovery message {}", persistenceId(), message);
index f43a1d9f9613e3871c6190948c12fb7f7172e1e0..40be108b2e40a5ae821b24bbd50f11c8f04654ed 100644 (file)
@@ -87,7 +87,7 @@ public class Gossiper extends AbstractUntypedActorWithMetering {
 
     Gossiper(final RemoteOpsProviderConfig config, final Boolean autoStartGossipTicks) {
         this.config = requireNonNull(config);
-        this.autoStartGossipTicks = autoStartGossipTicks.booleanValue();
+        this.autoStartGossipTicks = autoStartGossipTicks;
     }
 
     Gossiper(final RemoteOpsProviderConfig config) {
@@ -119,14 +119,19 @@ public class Gossiper extends AbstractUntypedActorWithMetering {
         }
 
         if (autoStartGossipTicks) {
-            gossipTask = getContext().system().scheduler().schedule(
-                    new FiniteDuration(1, TimeUnit.SECONDS),        //initial delay
-                    config.getGossipTickInterval(),                 //interval
-                    getSelf(),                                      //target
-                    GOSSIP_TICK,                                    //message
-                    getContext().dispatcher(),                      //execution context
-                    getSelf()                                       //sender
-            );
+            gossipTask = getContext().system().scheduler().scheduleAtFixedRate(
+                // initial delay
+                new FiniteDuration(1, TimeUnit.SECONDS),
+                // interval
+                config.getGossipTickInterval(),
+                // target
+                getSelf(),
+                // message
+                GOSSIP_TICK,
+                // execution context
+                getContext().dispatcher(),
+                // sender
+                getSelf());
         }
     }
 
@@ -146,25 +151,25 @@ public class Gossiper extends AbstractUntypedActorWithMetering {
         //These ticks can be sent by another actor as well which is esp. useful while testing
         if (GOSSIP_TICK.equals(message)) {
             receiveGossipTick();
-        } else if (message instanceof GossipStatus) {
+        } else if (message instanceof GossipStatus status) {
             // Message from remote gossiper with its bucket versions
-            receiveGossipStatus((GossipStatus) message);
-        } else if (message instanceof GossipEnvelope) {
+            receiveGossipStatus(status);
+        } else if (message instanceof GossipEnvelope envelope) {
             // Message from remote gossiper with buckets. This is usually in response to GossipStatus
             // message. The contained buckets are newer as determined by the remote gossiper by
             // comparing the GossipStatus message with its local versions.
-            receiveGossip((GossipEnvelope) message);
-        } else if (message instanceof ClusterEvent.MemberUp) {
-            receiveMemberUpOrReachable(((ClusterEvent.MemberUp) message).member());
+            receiveGossip(envelope);
+        } else if (message instanceof ClusterEvent.MemberUp memberUp) {
+            receiveMemberUpOrReachable(memberUp.member());
 
-        } else if (message instanceof ClusterEvent.ReachableMember) {
-            receiveMemberUpOrReachable(((ClusterEvent.ReachableMember) message).member());
+        } else if (message instanceof ClusterEvent.ReachableMember reachableMember) {
+            receiveMemberUpOrReachable(reachableMember.member());
 
-        } else if (message instanceof ClusterEvent.MemberRemoved) {
-            receiveMemberRemoveOrUnreachable(((ClusterEvent.MemberRemoved) message).member());
+        } else if (message instanceof ClusterEvent.MemberRemoved memberRemoved) {
+            receiveMemberRemoveOrUnreachable(memberRemoved.member());
 
-        } else if (message instanceof ClusterEvent.UnreachableMember) {
-            receiveMemberRemoveOrUnreachable(((ClusterEvent.UnreachableMember) message).member());
+        } else if (message instanceof ClusterEvent.UnreachableMember unreachableMember) {
+            receiveMemberRemoveOrUnreachable(unreachableMember.member());
 
         } else {
             unhandled(message);
index bfe60e5c8232896efcec8cf7c3c3690d7510c144..a38b8c4d890903f834c7fb33238ee13139e7cc4a 100644 (file)
@@ -13,6 +13,7 @@ import akka.actor.Address;
 import akka.util.Timeout;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.Map;
+import java.util.concurrent.TimeoutException;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
 import org.opendaylight.controller.remote.rpc.registry.AbstractRoutingTable;
@@ -35,6 +36,8 @@ abstract class AbstractRegistryMXBean<T extends AbstractRoutingTable<T, I>, I> e
     private final BucketStoreAccess bucketAccess;
     private final FiniteDuration timeout;
 
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR",
+        justification = "registerMBean() is expected to be stateless")
     AbstractRegistryMXBean(final @NonNull String beanName, final @NonNull String beanType,
             final @NonNull BucketStoreAccess bucketAccess, final @NonNull Timeout timeout) {
         super(beanName, beanType, null);
@@ -43,30 +46,30 @@ abstract class AbstractRegistryMXBean<T extends AbstractRoutingTable<T, I>, I> e
         registerMBean();
     }
 
-    @SuppressWarnings({"unchecked", "checkstyle:IllegalCatch", "rawtypes"})
+    @SuppressWarnings({"unchecked", "rawtypes"})
     final T localData() {
         try {
             return (T) Await.result((Future) bucketAccess.getLocalData(), timeout);
-        } catch (Exception e) {
-            throw new RuntimeException("getLocalData failed", e);
+        } catch (InterruptedException | TimeoutException e) {
+            throw new IllegalStateException("getLocalData failed", e);
         }
     }
 
-    @SuppressWarnings({"unchecked", "checkstyle:IllegalCatch", "rawtypes"})
+    @SuppressWarnings({"unchecked", "rawtypes"})
     final Map<Address, Bucket<T>> remoteBuckets() {
         try {
             return (Map<Address, Bucket<T>>) Await.result((Future)bucketAccess.getRemoteBuckets(), timeout);
-        } catch (Exception e) {
-            throw new RuntimeException("getRemoteBuckets failed", e);
+        } catch (InterruptedException | TimeoutException e) {
+            throw new IllegalStateException("getRemoteBuckets failed", e);
         }
     }
 
-    @SuppressWarnings({"unchecked", "checkstyle:IllegalCatch", "rawtypes"})
+    @SuppressWarnings({"unchecked", "rawtypes"})
     final String bucketVersions() {
         try {
             return Await.result((Future)bucketAccess.getBucketVersions(), timeout).toString();
-        } catch (Exception e) {
-            throw new RuntimeException("getVersions failed", e);
+        } catch (InterruptedException | TimeoutException e) {
+            throw new IllegalStateException("getVersions failed", e);
         }
     }
 }
index bacde76a4b7189731e2bb15c6188d212173b2d06..8314b13b70c61c8f53ca58792aa5bb21799dc83c 100644 (file)
@@ -19,11 +19,9 @@ import org.opendaylight.controller.remote.rpc.registry.gossip.Bucket;
 import org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreAccess;
 import org.opendaylight.mdsal.dom.api.DOMActionInstance;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 
 public class RemoteActionRegistryMXBeanImpl extends AbstractRegistryMXBean<ActionRoutingTable, DOMActionInstance>
         implements RemoteActionRegistryMXBean {
-
     public RemoteActionRegistryMXBeanImpl(final BucketStoreAccess actionRegistryAccess, final Timeout timeout) {
         super("RemoteActionRegistry", "RemoteActionBroker", actionRegistryAccess, timeout);
     }
@@ -33,8 +31,7 @@ public class RemoteActionRegistryMXBeanImpl extends AbstractRegistryMXBean<Actio
         ActionRoutingTable table = localData();
         Set<String> routedAction = new HashSet<>(table.getItems().size());
         for (DOMActionInstance route : table.getItems()) {
-            final YangInstanceIdentifier actionPath = YangInstanceIdentifier.create(new NodeIdentifier(
-                route.getType().lastNodeIdentifier()));
+            final YangInstanceIdentifier actionPath = YangInstanceIdentifier.of(route.getType().lastNodeIdentifier());
             if (!actionPath.isEmpty()) {
                 routedAction.add(ROUTE_CONSTANT + actionPath + NAME_CONSTANT + route.getType());
             }
@@ -84,8 +81,7 @@ public class RemoteActionRegistryMXBeanImpl extends AbstractRegistryMXBean<Actio
         Collection<DOMActionInstance> routes = table.getItems();
         Map<String, String> actionMap = new HashMap<>(routes.size());
         for (DOMActionInstance route : routes) {
-            final YangInstanceIdentifier actionPath = YangInstanceIdentifier.create(new NodeIdentifier(
-                route.getType().lastNodeIdentifier()));
+            final YangInstanceIdentifier actionPath = YangInstanceIdentifier.of(route.getType().lastNodeIdentifier());
             if (!actionPath.isEmpty()) {
                 String routeString = actionPath.toString();
                 if (routeString.contains(routeName)) {
@@ -104,8 +100,7 @@ public class RemoteActionRegistryMXBeanImpl extends AbstractRegistryMXBean<Actio
         Collection<DOMActionInstance> routes = table.getItems();
         Map<String, String> actionMap = new HashMap<>(routes.size());
         for (DOMActionInstance route : routes) {
-            final YangInstanceIdentifier actionPath = YangInstanceIdentifier.create(new NodeIdentifier(
-                route.getType().lastNodeIdentifier()));
+            final YangInstanceIdentifier actionPath = YangInstanceIdentifier.of(route.getType().lastNodeIdentifier());
             if (!actionPath.isEmpty()) {
                 String type = route.getType().toString();
                 if (type.contains(name)) {
index a3e336f307656f1177898c4c3371c3b2416f79d5..ac495265c5f5937feedbf7a195927f7dec62c0a3 100644 (file)
@@ -16,7 +16,6 @@ import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.testkit.javadsl.TestKit;
 import java.net.URI;
-import java.util.Collection;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -28,16 +27,15 @@ import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
 import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
 import org.opendaylight.mdsal.dom.api.DOMRpcResult;
 import org.opendaylight.mdsal.dom.api.DOMRpcService;
+import org.opendaylight.yangtools.yang.common.ErrorSeverity;
+import org.opendaylight.yangtools.yang.common.ErrorType;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorSeverity;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
 import org.opendaylight.yangtools.yang.test.util.YangParserTestUtils;
@@ -58,11 +56,10 @@ public class AbstractOpsTest {
 
 
     static final Absolute TEST_RPC_TYPE = Absolute.of(TEST_RPC);
-    static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier.create(
-            new YangInstanceIdentifier.NodeIdentifier(TEST_RPC));
+    static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier.of(TEST_RPC);
     public static final DOMRpcIdentifier TEST_RPC_ID = DOMRpcIdentifier.create(TEST_RPC, TEST_PATH);
-    public static final DOMDataTreeIdentifier TEST_DATA_TREE_ID = new DOMDataTreeIdentifier(
-            LogicalDatastoreType.OPERATIONAL, TEST_PATH);
+    public static final DOMDataTreeIdentifier TEST_DATA_TREE_ID =
+        DOMDataTreeIdentifier.of(LogicalDatastoreType.OPERATIONAL, TEST_PATH);
 
     static ActorSystem node1;
     static ActorSystem node2;
@@ -142,21 +139,25 @@ public class AbstractOpsTest {
     }
 
     public static ContainerNode makeRPCInput(final String data) {
-        return Builders.containerBuilder().withNodeIdentifier(new NodeIdentifier(TEST_RPC_INPUT))
-                .withChild(ImmutableNodes.leafNode(TEST_RPC_INPUT_DATA, data)).build();
+        return ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TEST_RPC_INPUT))
+            .withChild(ImmutableNodes.leafNode(TEST_RPC_INPUT_DATA, data))
+            .build();
 
     }
 
     public static ContainerNode makeRPCOutput(final String data) {
-        return Builders.containerBuilder().withNodeIdentifier(new NodeIdentifier(TEST_RPC_OUTPUT))
-                .withChild(ImmutableNodes.leafNode(TEST_RPC_OUTPUT, data)).build();
+        return ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TEST_RPC_OUTPUT))
+            .withChild(ImmutableNodes.leafNode(TEST_RPC_OUTPUT, data))
+            .build();
     }
 
     static void assertFailedRpcResult(final DOMRpcResult rpcResult, final ErrorSeverity severity,
                                       final ErrorType errorType, final String tag, final String message,
                                       final String applicationTag, final String info, final String causeMsg) {
         assertNotNull("RpcResult was null", rpcResult);
-        final Collection<? extends RpcError> rpcErrors = rpcResult.getErrors();
+        final var rpcErrors = rpcResult.errors();
         assertEquals("RpcErrors count", 1, rpcErrors.size());
         assertRpcErrorEquals(rpcErrors.iterator().next(), severity, errorType, tag, message,
                 applicationTag, info, causeMsg);
@@ -164,7 +165,7 @@ public class AbstractOpsTest {
 
     static void assertSuccessfulRpcResult(final DOMRpcResult rpcResult, final NormalizedNode expOutput) {
         assertNotNull("RpcResult was null", rpcResult);
-        assertCompositeNodeEquals(expOutput, rpcResult.getResult());
+        assertCompositeNodeEquals(expOutput, rpcResult.value());
     }
 
     static class TestException extends Exception {
index a4e00591d1fd422612e4b6e4a8809d8047917a84..8e6d17edc7b50f4eb744d574e983afec2a6b6776 100644 (file)
@@ -40,7 +40,7 @@ public class OpsBrokerTest extends AbstractOpsTest {
 
         final RpcResponse rpcResponse = rpcRegistry1Probe.expectMsgClass(Duration.ofSeconds(5), RpcResponse.class);
 
-        assertEquals(rpcResult.getResult(), rpcResponse.getOutput());
+        assertEquals(rpcResult.value(), rpcResponse.getOutput());
     }
 
     @Test
index 5b3555594ac9e59fb99046698c5950efc59d6d8b..8c067fff460a310c9f18cc52579af478134a474c 100644 (file)
@@ -28,8 +28,7 @@ public class OpsListenerTest {
 
     private static final QName TEST_QNAME = QName.create("test", "2015-06-12", "test");
     private static final Absolute RPC_TYPE = Absolute.of(TEST_QNAME);
-    private static final YangInstanceIdentifier TEST_PATH =
-            YangInstanceIdentifier.create(new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME));
+    private static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier.of(TEST_QNAME);
     private static final DOMRpcIdentifier RPC_ID = DOMRpcIdentifier.create(TEST_QNAME, TEST_PATH);
     private static final DOMActionInstance ACTION_INSTANCE = DOMActionInstance.of(RPC_TYPE,
             LogicalDatastoreType.OPERATIONAL, TEST_PATH);
index 4569ac68abc275a7e62a885632ebbf14b55da6e3..2dd529f741d33d98949e90a070448a9b659dea73 100644 (file)
@@ -38,9 +38,9 @@ import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.mdsal.dom.api.DOMActionInstance;
 import org.opendaylight.mdsal.dom.api.DOMActionProviderService;
 import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMRpcImplementationRegistration;
 import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
 import org.opendaylight.yangtools.concepts.ObjectRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
@@ -51,9 +51,9 @@ public class OpsRegistrarTest {
     @Mock
     private DOMActionProviderService actionService;
     @Mock
-    private DOMRpcImplementationRegistration<RemoteRpcImplementation> oldReg;
+    private Registration oldReg;
     @Mock
-    private DOMRpcImplementationRegistration<RemoteRpcImplementation> newReg;
+    private Registration newReg;
     @Mock
     private ObjectRegistration<RemoteActionImplementation> oldActionReg;
     @Mock
@@ -84,12 +84,10 @@ public class OpsRegistrarTest {
         final QName firstActionQName = QName.create("first:actionIdentifier", "fooAction");
 
         final DOMActionInstance firstActionInstance = DOMActionInstance.of(Absolute.of(firstActionQName),
-                LogicalDatastoreType.OPERATIONAL,
-                YangInstanceIdentifier.create(new YangInstanceIdentifier.NodeIdentifier(firstActionQName)));
+                LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of(firstActionQName));
 
         final DOMActionInstance secondActionInstance = DOMActionInstance.of(Absolute.of(firstActionQName),
-                LogicalDatastoreType.OPERATIONAL,
-                YangInstanceIdentifier.create(new YangInstanceIdentifier.NodeIdentifier(firstActionQName)));
+                LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of(firstActionQName));
 
         final TestKit senderKit = new TestKit(system);
         firstEndpoint = new RemoteRpcEndpoint(senderKit.getRef(), Collections.singletonList(firstEndpointId));
index 679a19c78f04268d47402d25f01e382e001aba35..2e94f83866cff960bcdab1467492eba7c7622475 100644 (file)
@@ -7,17 +7,19 @@
  */
 package org.opendaylight.controller.remote.rpc;
 
+import static org.hamcrest.CoreMatchers.instanceOf;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertThrows;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.eq;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.when;
 
 import com.google.common.util.concurrent.ListenableFuture;
 import java.util.Collections;
+import java.util.Optional;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 import org.junit.Ignore;
@@ -51,19 +53,17 @@ public class RemoteOpsImplementationTest extends AbstractOpsTest {
         final ContainerNode rpcOutput = makeRPCOutput("bar");
         final DOMRpcResult rpcResult = new DefaultDOMRpcResult(rpcOutput);
 
-        final NormalizedNode invokeRpcInput = makeRPCInput("foo");
-        @SuppressWarnings({"unchecked", "rawtypes"})
-        final ArgumentCaptor<NormalizedNode> inputCaptor =
-                ArgumentCaptor.forClass(NormalizedNode.class);
+        final ContainerNode invokeRpcInput = makeRPCInput("foo");
+        final ArgumentCaptor<ContainerNode> inputCaptor = ArgumentCaptor.forClass(ContainerNode.class);
 
         doReturn(FluentFutures.immediateFluentFuture(rpcResult)).when(domRpcService2)
             .invokeRpc(eq(TEST_RPC), inputCaptor.capture());
 
         final ListenableFuture<DOMRpcResult> frontEndFuture = remoteRpcImpl1.invokeRpc(TEST_RPC_ID, invokeRpcInput);
-        assertTrue(frontEndFuture instanceof RemoteDOMRpcFuture);
+        assertThat(frontEndFuture, instanceOf(RemoteDOMRpcFuture.class));
 
         final DOMRpcResult result = frontEndFuture.get(5, TimeUnit.SECONDS);
-        assertEquals(rpcOutput, result.getResult());
+        assertEquals(rpcOutput, result.value());
     }
 
     /**
@@ -74,17 +74,15 @@ public class RemoteOpsImplementationTest extends AbstractOpsTest {
         final ContainerNode actionOutput = makeRPCOutput("bar");
         final DOMActionResult actionResult = new SimpleDOMActionResult(actionOutput, Collections.emptyList());
         final NormalizedNode invokeActionInput = makeRPCInput("foo");
-        @SuppressWarnings({"unchecked", "rawtypes"})
         final ArgumentCaptor<ContainerNode> inputCaptor =
                 ArgumentCaptor.forClass(ContainerNode.class);
         doReturn(FluentFutures.immediateFluentFuture(actionResult)).when(domActionService2).invokeAction(
                 eq(TEST_RPC_TYPE), eq(TEST_DATA_TREE_ID), inputCaptor.capture());
         final ListenableFuture<DOMActionResult> frontEndFuture = remoteActionImpl1.invokeAction(TEST_RPC_TYPE,
                 TEST_DATA_TREE_ID, (ContainerNode) invokeActionInput);
-        assertTrue(frontEndFuture instanceof RemoteDOMActionFuture);
+        assertThat(frontEndFuture, instanceOf(RemoteDOMActionFuture.class));
         final DOMActionResult result = frontEndFuture.get(5, TimeUnit.SECONDS);
-        assertEquals(actionOutput, result.getOutput().get());
-
+        assertEquals(Optional.of(actionOutput), result.getOutput());
     }
 
     /**
@@ -95,18 +93,16 @@ public class RemoteOpsImplementationTest extends AbstractOpsTest {
         final ContainerNode rpcOutput = makeRPCOutput("bar");
         final DOMRpcResult rpcResult = new DefaultDOMRpcResult(rpcOutput);
 
-        @SuppressWarnings({"unchecked", "rawtypes"})
-        final ArgumentCaptor<NormalizedNode> inputCaptor =
-                (ArgumentCaptor) ArgumentCaptor.forClass(NormalizedNode.class);
+        final ArgumentCaptor<ContainerNode> inputCaptor = ArgumentCaptor.forClass(ContainerNode.class);
 
         doReturn(FluentFutures.immediateFluentFuture(rpcResult)).when(domRpcService2)
             .invokeRpc(eq(TEST_RPC), inputCaptor.capture());
 
         ListenableFuture<DOMRpcResult> frontEndFuture = remoteRpcImpl1.invokeRpc(TEST_RPC_ID, null);
-        assertTrue(frontEndFuture instanceof RemoteDOMRpcFuture);
+        assertThat(frontEndFuture, instanceOf(RemoteDOMRpcFuture.class));
 
         final DOMRpcResult result = frontEndFuture.get(5, TimeUnit.SECONDS);
-        assertEquals(rpcOutput, result.getResult());
+        assertEquals(rpcOutput, result.value());
     }
 
     /**
@@ -117,18 +113,16 @@ public class RemoteOpsImplementationTest extends AbstractOpsTest {
         final ContainerNode actionOutput = makeRPCOutput("bar");
         final DOMActionResult actionResult = new SimpleDOMActionResult(actionOutput);
 
-        @SuppressWarnings({"unchecked", "rawtypes"})
-            final ArgumentCaptor<ContainerNode> inputCaptor =
-                  ArgumentCaptor.forClass(ContainerNode.class);
+        final ArgumentCaptor<ContainerNode> inputCaptor = ArgumentCaptor.forClass(ContainerNode.class);
         doReturn(FluentFutures.immediateFluentFuture(actionResult)).when(domActionService2).invokeAction(
                 eq(TEST_RPC_TYPE), eq(TEST_DATA_TREE_ID), inputCaptor.capture());
 
         ListenableFuture<DOMActionResult> frontEndFuture = remoteActionImpl1.invokeAction(TEST_RPC_TYPE,
                 TEST_DATA_TREE_ID, actionOutput);
-        assertTrue(frontEndFuture instanceof RemoteDOMActionFuture);
+        assertThat(frontEndFuture, instanceOf(RemoteDOMActionFuture.class));
 
         final DOMActionResult result = frontEndFuture.get(5, TimeUnit.SECONDS);
-        assertEquals(actionOutput, result.getOutput().get());
+        assertEquals(Optional.of(actionOutput), result.getOutput());
     }
 
     /**
@@ -139,69 +133,54 @@ public class RemoteOpsImplementationTest extends AbstractOpsTest {
         final ContainerNode rpcOutput = null;
         final DOMRpcResult rpcResult = new DefaultDOMRpcResult(rpcOutput);
 
-        final NormalizedNode invokeRpcInput = makeRPCInput("foo");
-        @SuppressWarnings({"unchecked", "rawtypes"})
-        final ArgumentCaptor<NormalizedNode> inputCaptor =
-                (ArgumentCaptor) ArgumentCaptor.forClass(NormalizedNode.class);
+        final ContainerNode invokeRpcInput = makeRPCInput("foo");
+        final ArgumentCaptor<ContainerNode> inputCaptor = ArgumentCaptor.forClass(ContainerNode.class);
 
         doReturn(FluentFutures.immediateFluentFuture(rpcResult)).when(domRpcService2)
             .invokeRpc(eq(TEST_RPC), inputCaptor.capture());
 
         final ListenableFuture<DOMRpcResult> frontEndFuture = remoteRpcImpl1.invokeRpc(TEST_RPC_ID, invokeRpcInput);
-        assertTrue(frontEndFuture instanceof RemoteDOMRpcFuture);
+        assertThat(frontEndFuture, instanceOf(RemoteDOMRpcFuture.class));
 
         final DOMRpcResult result = frontEndFuture.get(5, TimeUnit.SECONDS);
-        assertNull(result.getResult());
+        assertNull(result.value());
     }
 
     /**
      * This test method invokes and executes the remote rpc.
      */
-    @SuppressWarnings({"checkstyle:AvoidHidingCauseException", "checkstyle:IllegalThrows"})
-    @Test(expected = DOMRpcException.class)
-    public void testInvokeRpcWithRemoteFailedFuture() throws Throwable {
-        final NormalizedNode invokeRpcInput = makeRPCInput("foo");
-        @SuppressWarnings({"unchecked", "rawtypes"})
-        final ArgumentCaptor<NormalizedNode> inputCaptor =
-                (ArgumentCaptor) ArgumentCaptor.forClass(NormalizedNode.class);
+    @Test
+    public void testInvokeRpcWithRemoteFailedFuture() {
+        final ContainerNode invokeRpcInput = makeRPCInput("foo");
+        final ArgumentCaptor<ContainerNode> inputCaptor = ArgumentCaptor.forClass(ContainerNode.class);
 
-        when(domRpcService2.invokeRpc(eq(TEST_RPC), inputCaptor.capture())).thenReturn(
-                FluentFutures.immediateFailedFluentFuture(new RemoteDOMRpcException("Test Exception", null)));
+        doReturn(FluentFutures.immediateFailedFluentFuture(new RemoteDOMRpcException("Test Exception", null)))
+            .when(domRpcService2).invokeRpc(eq(TEST_RPC), inputCaptor.capture());
 
         final ListenableFuture<DOMRpcResult> frontEndFuture = remoteRpcImpl1.invokeRpc(TEST_RPC_ID, invokeRpcInput);
-        assertTrue(frontEndFuture instanceof RemoteDOMRpcFuture);
+        assertThat(frontEndFuture, instanceOf(RemoteDOMRpcFuture.class));
 
-        try {
-            frontEndFuture.get(5, TimeUnit.SECONDS);
-        } catch (ExecutionException e) {
-            throw e.getCause();
-        }
+        final var ex = assertThrows(ExecutionException.class, () -> frontEndFuture.get(5, TimeUnit.SECONDS)).getCause();
+        assertThat(ex, instanceOf(DOMRpcException.class));
     }
 
     /**
      * This test method invokes and executes the remote rpc.
      */
-    @SuppressWarnings({"checkstyle:AvoidHidingCauseException", "checkstyle:IllegalThrows"})
-    @Test(expected = DOMActionException.class)
-    public void testInvokeActionWithRemoteFailedFuture() throws Throwable {
+    @Test
+    public void testInvokeActionWithRemoteFailedFuture() {
         final ContainerNode invokeActionInput = makeRPCInput("foo");
-        @SuppressWarnings({"unchecked", "rawtypes"})
-        final ArgumentCaptor<ContainerNode> inputCaptor =
-                ArgumentCaptor.forClass(ContainerNode.class);
+        final ArgumentCaptor<ContainerNode> inputCaptor = ArgumentCaptor.forClass(ContainerNode.class);
 
-        when(domActionService2.invokeAction(eq(TEST_RPC_TYPE), eq(TEST_DATA_TREE_ID),
-                inputCaptor.capture())).thenReturn(FluentFutures.immediateFailedFluentFuture(
-                        new RemoteDOMRpcException("Test Exception", null)));
+        doReturn(FluentFutures.immediateFailedFluentFuture(new RemoteDOMRpcException("Test Exception", null)))
+            .when(domActionService2).invokeAction(eq(TEST_RPC_TYPE), eq(TEST_DATA_TREE_ID), inputCaptor.capture());
 
         final ListenableFuture<DOMActionResult> frontEndFuture = remoteActionImpl1.invokeAction(TEST_RPC_TYPE,
                 TEST_DATA_TREE_ID, invokeActionInput);
-        assertTrue(frontEndFuture instanceof RemoteDOMActionFuture);
+        assertThat(frontEndFuture, instanceOf(RemoteDOMActionFuture.class));
 
-        try {
-            frontEndFuture.get(5, TimeUnit.SECONDS);
-        } catch (ExecutionException e) {
-            throw e.getCause();
-        }
+        final var ex = assertThrows(ExecutionException.class, () -> frontEndFuture.get(5, TimeUnit.SECONDS)).getCause();
+        assertThat(ex, instanceOf(DOMActionException.class));
     }
 
     /**
@@ -209,44 +188,38 @@ public class RemoteOpsImplementationTest extends AbstractOpsTest {
      * Currently ignored since this test with current config takes around 15 seconds to complete.
      */
     @Ignore
-    @Test(expected = RemoteDOMRpcException.class)
-    public void testInvokeRpcWithAkkaTimeoutException() throws Exception {
-        final NormalizedNode invokeRpcInput = makeRPCInput("foo");
+    @Test
+    public void testInvokeRpcWithAkkaTimeoutException() {
+        final ContainerNode invokeRpcInput = makeRPCInput("foo");
         final ListenableFuture<DOMRpcResult> frontEndFuture = remoteRpcImpl1.invokeRpc(TEST_RPC_ID, invokeRpcInput);
-        assertTrue(frontEndFuture instanceof RemoteDOMRpcFuture);
+        assertThat(frontEndFuture, instanceOf(RemoteDOMRpcFuture.class));
 
-        frontEndFuture.get(20, TimeUnit.SECONDS);
+        assertThrows(RemoteDOMRpcException.class, () -> frontEndFuture.get(20, TimeUnit.SECONDS));
     }
 
     /**
-     * This test method invokes remote rpc and lookup failed
-     * with runtime exception.
+     * This test method invokes remote rpc and lookup failed with runtime exception.
      */
-    @Test(expected = DOMRpcException.class)
-    @SuppressWarnings({"checkstyle:AvoidHidingCauseException", "checkstyle:IllegalThrows"})
-    public void testInvokeRpcWithLookupException() throws Throwable {
-        final NormalizedNode invokeRpcInput = makeRPCInput("foo");
+    @Test
+    public void testInvokeRpcWithLookupException() {
+        final ContainerNode invokeRpcInput = makeRPCInput("foo");
 
         doThrow(new RuntimeException("test")).when(domRpcService2).invokeRpc(any(QName.class),
-            any(NormalizedNode.class));
+            any(ContainerNode.class));
 
         final ListenableFuture<DOMRpcResult> frontEndFuture = remoteRpcImpl1.invokeRpc(TEST_RPC_ID, invokeRpcInput);
-        assertTrue(frontEndFuture instanceof RemoteDOMRpcFuture);
+        assertThat(frontEndFuture, instanceOf(RemoteDOMRpcFuture.class));
 
-        try {
-            frontEndFuture.get(5, TimeUnit.SECONDS);
-        } catch (ExecutionException e) {
-            throw e.getCause();
-        }
+        final var ex = assertThrows(ExecutionException.class, () -> frontEndFuture.get(5, TimeUnit.SECONDS)).getCause();
+        assertThat(ex, instanceOf(DOMRpcException.class));
     }
 
     /**
      * This test method invokes remote rpc and lookup failed
      * with runtime exception.
      */
-    @Test(expected = DOMActionException.class)
-    @SuppressWarnings({"checkstyle:AvoidHidingCauseException", "checkstyle:IllegalThrows"})
-    public void testInvokeActionWithLookupException() throws Throwable {
+    @Test
+    public void testInvokeActionWithLookupException() {
         final ContainerNode invokeRpcInput = makeRPCInput("foo");
 
         doThrow(new RuntimeException("test")).when(domActionService2).invokeAction(any(Absolute.class),
@@ -254,12 +227,9 @@ public class RemoteOpsImplementationTest extends AbstractOpsTest {
 
         final ListenableFuture<DOMActionResult> frontEndFuture = remoteActionImpl1.invokeAction(TEST_RPC_TYPE,
                 TEST_DATA_TREE_ID, invokeRpcInput);
-        assertTrue(frontEndFuture instanceof RemoteDOMActionFuture);
+        assertThat(frontEndFuture, instanceOf(RemoteDOMActionFuture.class));
 
-        try {
-            frontEndFuture.get(5, TimeUnit.SECONDS);
-        } catch (ExecutionException e) {
-            throw e.getCause();
-        }
+        final var ex = assertThrows(ExecutionException.class, () -> frontEndFuture.get(5, TimeUnit.SECONDS)).getCause();
+        assertThat(ex, instanceOf(DOMActionException.class));
     }
 }
index 6ced09682e409aa8ec2a5cb1ebae7e1f7cd59918..bba4305a9174982b0e631f490b2118684a456fa3 100644 (file)
@@ -13,6 +13,8 @@ import java.util.ArrayList;
 import java.util.List;
 import org.junit.Before;
 import org.junit.Test;
+import org.opendaylight.yangtools.yang.common.ErrorTag;
+import org.opendaylight.yangtools.yang.common.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcError;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
 
@@ -24,10 +26,9 @@ public class RpcErrorsExceptionTest {
 
     @Before
     public void setUp() {
-        final RpcError rpcError = RpcResultBuilder.newError(
-                RpcError.ErrorType.RPC, "error", "error message");
-        final RpcError rpcWarning = RpcResultBuilder.newWarning(
-                RpcError.ErrorType.RPC, "warning", "warning message");
+        final RpcError rpcError = RpcResultBuilder.newError(ErrorType.RPC, new ErrorTag("error"), "error message");
+        final RpcError rpcWarning = RpcResultBuilder.newWarning(ErrorType.RPC, new ErrorTag("warning"),
+            "warning message");
 
         rpcErrors = new ArrayList<>();
         rpcErrors.add(rpcError);
index eafbd0de0bafe808eee2f01a2fa1beb63bdcd3a4..189ca1700779a9ad36613eef07d8c6065311bccb 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.remote.rpc.messages;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.remote.rpc.AbstractOpsTest;
 
@@ -20,7 +20,7 @@ public class ExecuteOpsTest {
         ExecuteRpc expected = ExecuteRpc.from(AbstractOpsTest.TEST_RPC_ID,
                 AbstractOpsTest.makeRPCInput("serialization-test"));
 
-        ExecuteRpc actual = (ExecuteRpc) SerializationUtils.clone(expected);
+        ExecuteRpc actual = SerializationUtils.clone(expected);
 
         assertEquals("getName", expected.getType(), actual.getType());
         assertEquals("getInputNormalizedNode", expected.getInput(), actual.getInput());
index 938f14804221d15f33736a1a545724e6981f2b51..495707557fd8bcfaf0e3063532c5c49b46c79083 100644 (file)
@@ -11,7 +11,7 @@ import static org.junit.Assert.assertEquals;
 
 import java.util.Collections;
 import java.util.Optional;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.remote.rpc.AbstractOpsTest;
 
@@ -29,9 +29,9 @@ public class OpsResponseTest {
         ActionResponse expectedAction = new ActionResponse(
             Optional.of(AbstractOpsTest.makeRPCOutput("serialization-test")), Collections.emptyList());
 
-        RpcResponse actualRpc = (RpcResponse) SerializationUtils.clone(expectedRpc);
+        RpcResponse actualRpc = SerializationUtils.clone(expectedRpc);
 
-        ActionResponse actualAction = (ActionResponse) SerializationUtils.clone(expectedAction);
+        ActionResponse actualAction = SerializationUtils.clone(expectedAction);
 
         assertEquals("getResultNormalizedNode", expectedRpc.getOutput(),
                 actualRpc.getOutput());
index 3ab51193df37f3b1cdd6e366177ecee886c8b7d1..7f2f096e10c1793e38e80fcd15d5cc8e800f672f 100644 (file)
@@ -308,7 +308,7 @@ public class ActionRegistryTest {
         assertNotNull(maybeEndpoint);
         assertTrue(maybeEndpoint.isPresent());
 
-        final RemoteActionEndpoint endpoint = maybeEndpoint.get();
+        final RemoteActionEndpoint endpoint = maybeEndpoint.orElseThrow();
         final ActorRef router = endpoint.getRouter();
         assertNotNull(router);
 
@@ -377,7 +377,7 @@ public class ActionRegistryTest {
         for (int i = 0; i < nRoutes; i++) {
             QName type = QName.create("/mockaction", "mockaction" + routeIdCounter++);
             final DOMActionInstance routeId = DOMActionInstance.of(Absolute.of(type), LogicalDatastoreType.OPERATIONAL,
-                    YangInstanceIdentifier.create(new YangInstanceIdentifier.NodeIdentifier(type)));
+                    YangInstanceIdentifier.of(type));
             added.add(routeId);
 
             //Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
@@ -412,9 +412,9 @@ public class ActionRegistryTest {
 
     private List<DOMActionInstance> createRouteIds() {
         QName type = QName.create("/mockaction", "mockaction" + routeIdCounter++);
-        List<DOMActionInstance> routeIds = new ArrayList<>(1);
+        var routeIds = new ArrayList<DOMActionInstance>(1);
         routeIds.add(DOMActionInstance.of(Absolute.of(type), LogicalDatastoreType.OPERATIONAL,
-            YangInstanceIdentifier.create(new YangInstanceIdentifier.NodeIdentifier(type))));
+            YangInstanceIdentifier.of(type)));
         return routeIds;
     }
 }
index 563bde4b143fc9ad679bcb221d5f449b0b08b522..18b2f9f6e0769383b4c32a8d46c4b364129585f5 100644 (file)
@@ -304,7 +304,7 @@ public class RpcRegistryTest {
         assertNotNull(maybeEndpoint);
         assertTrue(maybeEndpoint.isPresent());
 
-        final RemoteRpcEndpoint endpoint = maybeEndpoint.get();
+        final RemoteRpcEndpoint endpoint = maybeEndpoint.orElseThrow();
         final ActorRef router = endpoint.getRouter();
         assertNotNull(router);
 
index ed6d12c34d6883d22905ae7f9d025164a8122688..fc364015986085d7351a8ebe9a58d186af1da3d3 100644 (file)
@@ -26,14 +26,12 @@ import org.opendaylight.controller.remote.rpc.RemoteOpsProviderConfig;
 import org.opendaylight.controller.remote.rpc.TerminationMonitor;
 
 public class BucketStoreTest {
-
     /**
      * Dummy class to eliminate rawtype warnings.
      *
      * @author gwu
-     *
      */
-    private static class T implements BucketData<T> {
+    private static final class T implements BucketData<T> {
         @Override
         public Optional<ActorRef> getWatchActor() {
             return Optional.empty();
index 18b443d8fd9deb3028690387e5479e9bad90d502..e53fce46376b1fa090ad874d1a32e171a18024dd 100644 (file)
@@ -51,7 +51,7 @@ public class RemoteActionRegistryMXBeanImplTest {
         system = ActorSystem.create("test", ConfigFactory.load().getConfig("unit-test"));
 
         final DOMActionInstance emptyActionIdentifier = DOMActionInstance.of(
-                REMOTE_SCHEMA_PATH, LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.empty());
+                REMOTE_SCHEMA_PATH, LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of());
         final DOMActionInstance localActionIdentifier = DOMActionInstance.of(
                 LOCAL_SCHEMA_PATH, LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of(LOCAL_QNAME));
 
index 282b7b5ee49371df7884a9002f5e50c581e8efec..26f4a1fa32deaf136ec760103293c27ea9dc9a38 100644 (file)
@@ -51,7 +51,7 @@ public class RemoteRpcRegistryMXBeanImplTest {
         system = ActorSystem.create("test", ConfigFactory.load().getConfig("unit-test"));
 
         final DOMRpcIdentifier emptyRpcIdentifier = DOMRpcIdentifier.create(
-                REMOTE_QNAME, YangInstanceIdentifier.empty());
+                REMOTE_QNAME, YangInstanceIdentifier.of());
         final DOMRpcIdentifier localRpcIdentifier = DOMRpcIdentifier.create(
                 LOCAL_QNAME, YangInstanceIdentifier.of(LOCAL_QNAME));
 
index ba1c2d0233bb23a97aa0a3e175e82d448fedacf5..ce09fbb5fba3a16e76911dfe57aa07e5382496d0 100644 (file)
@@ -6,7 +6,7 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>mdsal-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../parent</relativePath>
     </parent>
 
index f8260f303e1bf864dce16fb0d2b75abbc50bbe54..0fc796cac834888b71c1ff78aa0d70ff69af340d 100644 (file)
     <parent>
         <groupId>org.opendaylight.odlparent</groupId>
         <artifactId>odlparent-lite</artifactId>
-        <version>9.0.12</version>
+        <version>13.0.11</version>
         <relativePath/>
     </parent>
 
     <groupId>org.opendaylight.controller.samples</groupId>
     <artifactId>clustering-it-config</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <packaging>jar</packaging>
 
     <build>
index 78df5c088ed13e657a4c219ac3cf5c37f7412cdf..450c703289b3eb2bd977c6b5b73b6019ee20bf9f 100644 (file)
@@ -12,7 +12,7 @@
     <parent>
         <artifactId>mdsal-parent</artifactId>
         <groupId>org.opendaylight.controller</groupId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../../../parent/pom.xml</relativePath>
     </parent>
     <modelVersion>4.0.0</modelVersion>
             <groupId>org.opendaylight.mdsal</groupId>
             <artifactId>mdsal-binding-dom-codec-api</artifactId>
         </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>mdsal-dom-api</artifactId>
+        </dependency>
         <dependency>
             <groupId>org.opendaylight.controller.samples</groupId>
             <artifactId>clustering-it-model</artifactId>
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/AbstractDOMRpcAction.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/AbstractDOMRpcAction.java
new file mode 100644 (file)
index 0000000..b8fcf74
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.concurrent.ExecutionException;
+import org.apache.karaf.shell.api.action.Action;
+import org.opendaylight.mdsal.dom.api.DOMRpcResult;
+
+public abstract class AbstractDOMRpcAction implements Action {
+    @Override
+    @SuppressWarnings("checkstyle:RegexpSinglelineJava")
+    public final Object execute() throws InterruptedException, ExecutionException {
+        final DOMRpcResult result = invokeRpc().get();
+        if (!result.errors().isEmpty()) {
+            // FIXME: is there a better way to report errors?
+            System.out.println("Invocation failed: " + result.errors());
+            return null;
+        } else {
+            return result.value().prettyTree().get();
+        }
+    }
+
+    protected abstract ListenableFuture<? extends DOMRpcResult> invokeRpc();
+}
index 7590c3c3dc5d70545ffcffe7394931ad86999260..f9759777efc03aeeecd7933bc39eb22860ea6b60 100644 (file)
@@ -38,7 +38,7 @@ public final class DefaultInstanceIdentifierSupport implements InstanceIdentifie
     public DefaultInstanceIdentifierSupport(@Reference final BindingCodecTree bindingCodecTree,
             @Reference final BindingRuntimeContext runtimeContext) {
         bindingCodec = bindingCodecTree.getInstanceIdentifierCodec();
-        jsonCodec = JSONCodecFactorySupplier.RFC7951.createLazy(runtimeContext.getEffectiveModelContext())
+        jsonCodec = JSONCodecFactorySupplier.RFC7951.createLazy(runtimeContext.modelContext())
             .codecFor(new FakeLeafDefinition(), null);
     }
 
index ed42936064ad76d4f8273716b670b3336b1bbde5..13e44e3dbccb13488a354b257b3a9e316e7780a5 100644 (file)
@@ -12,8 +12,8 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.CarService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterCommitCohort;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterCommitCohortInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -21,11 +21,10 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 @Command(scope = "test-app", name = "register-commit-cohort", description = "Run a register-commit-cohort test")
 public class RegisterCommitCohortCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(CarService.class)
-            .registerCommitCohort(new RegisterCommitCohortInputBuilder().build());
+        return rpcService.getRpc(RegisterCommitCohort.class).invoke(new RegisterCommitCohortInputBuilder().build());
     }
 }
index dba686f5e328d194cf670072adf0153dcda10040..2be532865d2a046fd3cc297c8913a604370c9539 100644 (file)
@@ -12,8 +12,8 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.CarService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterLoggingDtcl;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterLoggingDtclInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -21,11 +21,10 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 @Command(scope = "test-app", name = "register-logging-dtcl", description = "Run a register-logging-dtcl test")
 public class RegisterLoggingDtclCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(CarService.class)
-            .registerLoggingDtcl(new RegisterLoggingDtclInputBuilder().build());
+        return rpcService.getRpc(RegisterLoggingDtcl.class).invoke(new RegisterLoggingDtclInputBuilder().build());
     }
 }
index 5af5cd66d9e2420e148a754e48cf7901741d9797..6298a878089a53c3d71a5a29ffd4daab09a86659 100644 (file)
@@ -13,8 +13,8 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.CarService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterOwnership;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterOwnershipInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -22,13 +22,13 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 @Command(scope = "test-app", name = "register-ownership", description = "Run a register-ownership test")
 public class RegisterOwnershipCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
     @Argument(index = 0, name = "car-id", required = true)
     private String carId;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(CarService.class)
-            .registerOwnership(new RegisterOwnershipInputBuilder().setCarId(carId).build());
+        return rpcService.getRpc(RegisterOwnership.class)
+            .invoke(new RegisterOwnershipInputBuilder().setCarId(carId).build());
     }
 }
index 7a412685912afab703aa1e3d2acb94c2a1093254..76267112104a06ee792b556facf04318666b09de 100644 (file)
@@ -12,8 +12,8 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.CarService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StopStressTest;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StopStressTestInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -21,10 +21,10 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 @Command(scope = "test-app" , name = "stop-stress-test", description = "Run a stop-stress-test")
 public class StopStressTestCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(CarService.class).stopStressTest(new StopStressTestInputBuilder().build());
+        return rpcService.getRpc(StopStressTest.class).invoke(new StopStressTestInputBuilder().build());
     }
 }
index f3bf816d6f46a445b2c51a309afd5617aa286187..8af63e1ac2f7f6ab3c4dedfa8ff147b8d623a5b7 100644 (file)
@@ -13,8 +13,8 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.CarService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StressTest;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StressTestInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.Uint16;
@@ -24,7 +24,7 @@ import org.opendaylight.yangtools.yang.common.Uint32;
 @Command(scope = "test-app" , name = "stress-test", description = "Run a stress-test")
 public class StressTestCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
     @Argument(index = 0, name = "rate", required = true)
     private int rate;
     @Argument(index = 1, name = "count", required = true)
@@ -32,7 +32,7 @@ public class StressTestCommand extends AbstractRpcAction {
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(CarService.class).stressTest(new StressTestInputBuilder()
+        return rpcService.getRpc(StressTest.class).invoke(new StressTestInputBuilder()
             .setRate(Uint16.valueOf(rate))
             .setCount(Uint32.valueOf(count))
             .build());
index c0e0f4efb8b836ba1b1c2e5f79c9ad35a3eecf6e..95182bc1b17211d2d577b2af0f19a13387447375 100644 (file)
@@ -12,8 +12,8 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.CarService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterCommitCohort;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterCommitCohortInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -21,11 +21,10 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 @Command(scope = "test-app", name = "unregister-commit-cohort", description = "Run a unregister-commit-cohort test")
 public class UnregisterCommitCohortCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(CarService.class)
-            .unregisterCommitCohort(new UnregisterCommitCohortInputBuilder().build());
+        return rpcService.getRpc(UnregisterCommitCohort.class).invoke(new UnregisterCommitCohortInputBuilder().build());
     }
 }
index e3891fd8832d0545bd93e7ad09273e45fa9884f9..763b8e06516dbdd5b0dd4c963cb439e55a9e6e29 100644 (file)
@@ -12,8 +12,8 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.CarService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterLoggingDtcls;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterLoggingDtclsInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -21,11 +21,10 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 @Command(scope = "test-app", name = "unregister-logging-dtcls", description = "Run and unregister-logging-dtcls test")
 public class UnregisterLoggingDtclsCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(CarService.class)
-            .unregisterLoggingDtcls(new UnregisterLoggingDtclsInputBuilder().build());
+        return rpcService.getRpc(UnregisterLoggingDtcls.class).invoke(new UnregisterLoggingDtclsInputBuilder().build());
     }
 }
index bb8bf0e14a5dfac7b6908153f62374dd7e772023..2b9730cb14a2496a49bef9d320bb010d1c3b33fa 100644 (file)
@@ -13,23 +13,22 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.CarService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterOwnership;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterOwnershipInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
-
 @Service
 @Command(scope = "test-app", name = "unregister-ownership", description = "Run an unregister-ownership test")
 public class UnregisterOwnershipCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
     @Argument(index = 0, name = "car-id", required = true)
     private String carId;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(CarService.class)
-            .unregisterOwnership(new UnregisterOwnershipInputBuilder().setCarId(carId).build());
+        return rpcService.getRpc(UnregisterOwnership.class)
+            .invoke(new UnregisterOwnershipInputBuilder().setCarId(carId).build());
     }
 }
index 3290f9de82f7deb2e7a7e4acd5b5ca8be04d63a8..c5b5e67401c4f2a71aef9f4cce4f401d994cc9b8 100644 (file)
@@ -14,9 +14,9 @@ import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
 import org.opendaylight.clustering.it.karaf.cli.InstanceIdentifierSupport;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCar;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCarInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseService;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.CarId;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PersonId;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PersonRef;
@@ -26,7 +26,7 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 @Command(scope = "test-app", name = "buy-car", description = "Run a buy-car test")
 public class BuyCarCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
     @Reference
     private InstanceIdentifierSupport iidSupport;
     @Argument(index = 0, name = "person-ref", required = true)
@@ -38,7 +38,7 @@ public class BuyCarCommand extends AbstractRpcAction {
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(CarPurchaseService.class).buyCar(new BuyCarInputBuilder()
+        return rpcService.getRpc(BuyCar.class).invoke(new BuyCarInputBuilder()
             .setPerson(new PersonRef(iidSupport.parseArgument(personRef)))
             .setCarId(carId)
             .setPersonId(personId)
index 4169f70de063248a043e3f911e5a100f0f4f80a1..4f55f4c3831e0eaa482e076e235ed5596f56077e 100644 (file)
@@ -13,22 +13,22 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.AddShardReplica;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.AddShardReplicaInputBuilder;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
 @Service
 @Command(scope = "test-app", name = "add-shard-replica", description = "Run an add-shard-replica test")
 public class AddShardReplicaCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
     @Argument(index = 0, name = "shard-name", required = true)
     private String shardName;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(OdlMdsalLowlevelControlService.class)
-                .addShardReplica(new AddShardReplicaInputBuilder().setShardName(shardName).build());
+        return rpcService.getRpc(AddShardReplica.class)
+            .invoke(new AddShardReplicaInputBuilder().setShardName(shardName).build());
     }
 }
index fe8cf7e1a1eb1940d1a4f053c773ac8c3e54cc76..d26a63570ec62136e2f74318bf62eb02dd026e57 100644 (file)
@@ -13,9 +13,9 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CheckPublishNotifications;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CheckPublishNotificationsInputBuilder;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
 @Service
@@ -23,15 +23,13 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
          description = "Run a check-publish-notifications test")
 public class CheckPublishNotificationsCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
     @Argument(index = 0, name = "id", required = true)
     private String id;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(OdlMdsalLowlevelControlService.class)
-                .checkPublishNotifications(new CheckPublishNotificationsInputBuilder()
-                        .setId(id)
-                        .build());
+        return rpcService.getRpc(CheckPublishNotifications.class)
+                .invoke(new CheckPublishNotificationsInputBuilder().setId(id).build());
     }
 }
index 0b238ee9f2e58c424a4948781b62b5dae1f48e6f..7886dc375203cd8a294bbc68ae6de2a2344de78a 100644 (file)
@@ -12,20 +12,19 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.IsClientAborted;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.IsClientAbortedInputBuilder;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
 @Service
 @Command(scope = "test-app", name = "is-client-aborted", description = "Run an is-client-aborted test")
 public class IsClientAbortedCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(OdlMdsalLowlevelControlService.class)
-                .isClientAborted(new IsClientAbortedInputBuilder().build());
+        return rpcService.getRpc(IsClientAborted.class).invoke(new IsClientAbortedInputBuilder().build());
     }
 }
index ff9105304cb541e9f2024a3532a8ee995fd0fb5b..afc9e824e57b87b757f1bcd04ef4fbf35d44708e 100644 (file)
@@ -14,8 +14,8 @@ import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
 import org.opendaylight.clustering.it.karaf.cli.InstanceIdentifierSupport;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterBoundConstant;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterBoundConstantInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -23,7 +23,7 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 @Command(scope = "test-app", name = "register-bound-constant", description = "Run a register-bound-constant test")
 public class RegisterBoundConstantCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
     @Reference
     private InstanceIdentifierSupport iidSupport;
     @Argument(index =  0, name = "context", required = true)
@@ -33,8 +33,8 @@ public class RegisterBoundConstantCommand extends AbstractRpcAction {
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(OdlMdsalLowlevelControlService.class)
-                .registerBoundConstant(new RegisterBoundConstantInputBuilder()
+        return rpcService.getRpc(RegisterBoundConstant.class)
+                .invoke(new RegisterBoundConstantInputBuilder()
                         .setConstant(constant)
                         .setContext(iidSupport.parseArgument(context))
                         .build());
index b1149493973adcf6e724c201d1643c0595f4e91d..abab942b5716550721ef638e8f742c722b006aee 100644 (file)
@@ -13,8 +13,8 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterConstant;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterConstantInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -22,13 +22,13 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 @Command(scope = "test-app", name = "register-contact", description = "Run a register-contact test")
 public class RegisterConstantCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
     @Argument(index =  0, name = "constant", required = true)
     private String constant;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(OdlMdsalLowlevelControlService.class)
-                .registerConstant(new RegisterConstantInputBuilder().setConstant(constant).build());
+        return rpcService.getRpc(RegisterConstant.class)
+                .invoke(new RegisterConstantInputBuilder().setConstant(constant).build());
     }
 }
index 4095c3c29f5f4e86e67e4efa3f529c1aa5f363f3..6896561017b643aa69c28c0c488f1ba0a049a1a3 100644 (file)
@@ -13,8 +13,8 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterDefaultConstant;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterDefaultConstantInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -22,15 +22,13 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 @Command(scope = "test-app", name = "register-default-constant", description = "Run a register-default-constant test")
 public class RegisterDefaultConstantCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
     @Argument(index =  0, name = "constant", required = true)
     private String constant;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(OdlMdsalLowlevelControlService.class)
-                .registerDefaultConstant(new RegisterDefaultConstantInputBuilder()
-                        .setConstant(constant)
-                        .build());
+        return rpcService.getRpc(RegisterDefaultConstant.class)
+                .invoke(new RegisterDefaultConstantInputBuilder().setConstant(constant).build());
     }
 }
index 8bd75fb0b7c0f40dc9e42518c643542c60027e45..4ae4288d1614f1b0aca70f33377964c19aba1b0c 100644 (file)
@@ -12,8 +12,8 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterFlappingSingleton;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterFlappingSingletonInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -22,11 +22,11 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
          description = "Run a register-flapping-singleton test")
 public class RegisterFlappingSingletonCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(OdlMdsalLowlevelControlService.class)
-                .registerFlappingSingleton(new RegisterFlappingSingletonInputBuilder().build());
+        return rpcService.getRpc(RegisterFlappingSingleton.class)
+                .invoke(new RegisterFlappingSingletonInputBuilder().build());
     }
 }
index 64be2ed57711721ff7526f1d5516b9b3767ef23a..7901a2367bddbf09e41bb3e17161112259770409 100644 (file)
@@ -13,8 +13,8 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterSingletonConstant;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterSingletonConstantInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -23,16 +23,13 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
          description = "Run a register-singleton-constant text")
 public class RegisterSingletonConstantCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
     @Argument(index = 0, name = "constant", required = true)
     private String constant;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(OdlMdsalLowlevelControlService.class)
-                .registerSingletonConstant(new RegisterSingletonConstantInputBuilder()
-                        .setConstant(constant)
-                        .build());
-
+        return rpcService.getRpc(RegisterSingletonConstant.class)
+                .invoke(new RegisterSingletonConstantInputBuilder().setConstant(constant).build());
     }
 }
index 2565156812814441352c3977dcd6bcc7751db5b3..33822014dca27c52c7e93a638415193f9216a4b0 100644 (file)
@@ -13,8 +13,8 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RemoveShardReplica;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RemoveShardReplicaInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -22,13 +22,13 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 @Command(scope = "test-app", name = "remove-shard-replica", description = "Run a remove-shard-replica test")
 public class RemoveShardReplicaCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
     @Argument(index = 0, name = "shard-name", required = true)
     private String shardName;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(OdlMdsalLowlevelControlService.class)
-                .removeShardReplica(new RemoveShardReplicaInputBuilder().setShardName(shardName).build());
+        return rpcService.getRpc(RemoveShardReplica.class)
+                .invoke(new RemoveShardReplicaInputBuilder().setShardName(shardName).build());
     }
 }
index f8f025370e7b8a906a847648984f15237896e0c7..79e619bb16dafa310029bba84904a36cc0d1c4ca 100644 (file)
@@ -13,8 +13,8 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ShutdownShardReplica;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ShutdownShardReplicaInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -22,14 +22,14 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 @Command(scope = "test-app", name = "shutdown-shard-replica", description = " Run a shutdown-shard-replica test")
 public class ShutdownShardReplicaCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
     @Argument(index = 0, name = "shard-name", required = true)
     private String shardName;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(OdlMdsalLowlevelControlService.class)
-                .shutdownShardReplica(new ShutdownShardReplicaInputBuilder()
+        return rpcService.getRpc(ShutdownShardReplica.class)
+                .invoke(new ShutdownShardReplicaInputBuilder()
                         .setShardName(shardName)
                         .build());
     }
index 68be7e30be4ec361062bab552730122cdb61aeda..fdae5a7947a36e04d829b534011d38260c64694d 100644 (file)
@@ -13,8 +13,8 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.StartPublishNotifications;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.StartPublishNotificationsInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.Uint32;
@@ -24,7 +24,7 @@ import org.opendaylight.yangtools.yang.common.Uint32;
          description = "Run a start-publish-notifications test")
 public class StartPublishNotificationsCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
     @Argument(index = 0, name = "id", required = true)
     private String id;
     @Argument(index = 1, name = "seconds", required = true)
@@ -34,8 +34,8 @@ public class StartPublishNotificationsCommand extends AbstractRpcAction {
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(OdlMdsalLowlevelControlService.class)
-                .startPublishNotifications(new StartPublishNotificationsInputBuilder()
+        return rpcService.getRpc(StartPublishNotifications.class)
+                .invoke(new StartPublishNotificationsInputBuilder()
                         .setId(id)
                         .setSeconds(Uint32.valueOf(seconds))
                         .setNotificationsPerSecond(Uint32.valueOf(notificationsPerSecond))
index 3a58f63b90f90dcf9d7385295bea2b23dcc586e4..b23b63f3e591e1fe14ef487406a8a76eabd5bb6e 100644 (file)
@@ -12,8 +12,8 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDdtl;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDdtlInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -21,11 +21,10 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 @Command(scope = "test-app", name = "subscribe-ddtl", description = "Run a subscribe-ddtl test")
 public class SubscribeDdtlCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(OdlMdsalLowlevelControlService.class)
-                .subscribeDdtl(new SubscribeDdtlInputBuilder().build());
+        return rpcService.getRpc(SubscribeDdtl.class).invoke(new SubscribeDdtlInputBuilder().build());
     }
 }
index 4c20e60569f5c13cefb3400294653d875d9560f4..33b5ea138be72b953fd1db374ffcc889b4725074 100644 (file)
@@ -12,8 +12,8 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDtcl;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDtclInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -21,11 +21,10 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 @Command(scope = "test-app", name = "subscribe-dtcl", description = "Run a subscribe-dtcl test")
 public class SubscribeDtclCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(OdlMdsalLowlevelControlService.class)
-                .subscribeDtcl(new SubscribeDtclInputBuilder().build());
+        return rpcService.getRpc(SubscribeDtcl.class).invoke(new SubscribeDtclInputBuilder().build());
     }
 }
index 00e6fa4c15f14069a8237786c328868737324015..fc4ab18c08fce2934a907bd38e74e8e53eb8bccc 100644 (file)
@@ -13,8 +13,8 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeYnl;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeYnlInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -22,13 +22,12 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 @Command(scope = "test-app", name = "subscribe-ynl", description = "Run a subscribe-ynl test")
 public class SubscribeYnlCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
     @Argument(index = 0, name = "id", required = true)
     private String id;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(OdlMdsalLowlevelControlService.class)
-                .subscribeYnl(new SubscribeYnlInputBuilder().setId(id).build());
+        return rpcService.getRpc(SubscribeYnl.class).invoke(new SubscribeYnlInputBuilder().setId(id).build());
     }
 }
index 3cc086efa677bd1a874357399e8b42c6e5c83138..5274a99156f3665d370d50782a732290a285cfdd 100644 (file)
@@ -14,8 +14,8 @@ import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
 import org.opendaylight.clustering.it.karaf.cli.InstanceIdentifierSupport;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterBoundConstant;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterBoundConstantInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -23,7 +23,7 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 @Command(scope = "test-app", name = "unregister-bound-constant", description = "Run an unregister-bound-constant test")
 public class UnregisterBoundConstantCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
     @Reference
     private InstanceIdentifierSupport iidSupport;
     @Argument(index = 0, name = "context", required = true)
@@ -31,8 +31,8 @@ public class UnregisterBoundConstantCommand extends AbstractRpcAction {
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(OdlMdsalLowlevelControlService.class)
-                .unregisterBoundConstant(new UnregisterBoundConstantInputBuilder()
+        return rpcService.getRpc(UnregisterBoundConstant.class)
+                .invoke(new UnregisterBoundConstantInputBuilder()
                         .setContext(iidSupport.parseArgument(context))
                         .build());
     }
index da761b3e3aa3cea6795c011cdfd7309f5e84b67d..ea857dbf6201da594640d65fe5196e45d8ffae7b 100644 (file)
@@ -12,8 +12,8 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterConstant;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterConstantInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -21,11 +21,10 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 @Command(scope = "test-app", name = "unregister-constant", description = "Run an unregister-constant test")
 public class UnregisterConstantCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(OdlMdsalLowlevelControlService.class)
-                .unregisterConstant(new UnregisterConstantInputBuilder().build());
+        return rpcService.getRpc(UnregisterConstant.class).invoke(new UnregisterConstantInputBuilder().build());
     }
 }
index 565b72dc33ec753999ce6c6b07e7499bdf6c23ef..d17fad9d678153bf21075c6934ece5eea815933c 100644 (file)
@@ -12,8 +12,8 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterDefaultConstant;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterDefaultConstantInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -22,11 +22,11 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
          description = "Run an unregister-default-constant test")
 public class UnregisterDefaultConstantCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(OdlMdsalLowlevelControlService.class)
-                .unregisterDefaultConstant(new UnregisterDefaultConstantInputBuilder().build());
+        return rpcService.getRpc(UnregisterDefaultConstant.class)
+            .invoke(new UnregisterDefaultConstantInputBuilder().build());
     }
 }
index de7cc5b7c2dcf45fdd5078cf4047c2d8d855065a..17954a722da1b70955456871e314b33acb6fc7cc 100644 (file)
@@ -12,8 +12,8 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterFlappingSingleton;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterFlappingSingletonInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -22,11 +22,11 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
         description = "Run an unregister-flapping-singleton test")
 public class UnregisterFlappingSingletonCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(OdlMdsalLowlevelControlService.class)
-                .unregisterFlappingSingleton(new UnregisterFlappingSingletonInputBuilder().build());
+        return rpcService.getRpc(UnregisterFlappingSingleton.class)
+                .invoke(new UnregisterFlappingSingletonInputBuilder().build());
     }
 }
index 7923430f06c6de9c743fe2f24b65a6d3e5795c80..32bd3bc84d97cdcf3ffa7187a2a8aa76c4361e94 100644 (file)
@@ -12,8 +12,8 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterSingletonConstant;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterSingletonConstantInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -22,11 +22,11 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
         description = "Run an unregister-singleton-constant test")
 public class UnregisterSingletonConstantCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(OdlMdsalLowlevelControlService.class)
-                .unregisterSingletonConstant(new UnregisterSingletonConstantInputBuilder().build());
+        return rpcService.getRpc(UnregisterSingletonConstant.class)
+                .invoke(new UnregisterSingletonConstantInputBuilder().build());
     }
 }
index b44639d423d8259f69fe8dfb323c5767ca17f678..1a6c18f3a005bdf2fa100d7910b6d817cc838ad3 100644 (file)
@@ -12,8 +12,8 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDdtl;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDdtlInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -21,11 +21,10 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 @Command(scope = "test-app", name = "unsubscribe-ddtl", description = "Run an unsubscribe-ddtl test")
 public class UnsubscribeDdtlCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(OdlMdsalLowlevelControlService.class)
-                .unsubscribeDdtl(new UnsubscribeDdtlInputBuilder().build());
+        return rpcService.getRpc(UnsubscribeDdtl.class).invoke(new UnsubscribeDdtlInputBuilder().build());
     }
 }
index 99ab3dcce9b922a53c1205df00eab80712e17819..7ad6cc452feafce685d42d0c4a9b5daaf1502c99 100644 (file)
@@ -12,8 +12,8 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDtcl;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDtclInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -21,11 +21,10 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 @Command(scope = "test-app", name = "unsubscribe-dtcl", description = "Run an unsubscribe-dtcl test")
 public class UnsubscribeDtclCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(OdlMdsalLowlevelControlService.class)
-                .unsubscribeDtcl(new UnsubscribeDtclInputBuilder().build());
+        return rpcService.getRpc(UnsubscribeDtcl.class).invoke(new UnsubscribeDtclInputBuilder().build());
     }
 }
index cd01e0cc1d99d9a9c4e11425e81f0b3179764b6c..0f83b6ab3b0d976a31ef31133122f06eadee815b 100644 (file)
@@ -13,8 +13,8 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeYnl;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeYnlInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
@@ -22,15 +22,13 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 @Command(scope = "test-app", name = "unsubscribe-ynl", description = "Run an unsubscribe-ynl test")
 public class UnsubscribeYnlCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
     @Argument(index = 0, name = "id", required = true)
     private String id;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(OdlMdsalLowlevelControlService.class)
-                .unsubscribeYnl(new UnsubscribeYnlInputBuilder()
-                        .setId(id)
-                        .build());
+        return rpcService.getRpc(UnsubscribeYnl.class)
+                .invoke(new UnsubscribeYnlInputBuilder().setId(id).build());
     }
 }
index ad90f8733a8f326a653d2527b962c565e8f169e8..d6da19228ffc78a7371938a2bd809adbcb46abc3 100644 (file)
@@ -13,8 +13,8 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.WriteTransactions;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.WriteTransactionsInputBuilder;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.Uint32;
@@ -23,7 +23,7 @@ import org.opendaylight.yangtools.yang.common.Uint32;
 @Command(scope = "test-app", name = "write-transactions", description = "Run a write-transactions test")
 public class WriteTransactionsCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
     @Argument(index = 0, name = "id", required = true)
     private String id;
     @Argument(index = 1, name = "seconds", required = true)
@@ -35,8 +35,8 @@ public class WriteTransactionsCommand extends AbstractRpcAction {
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(OdlMdsalLowlevelControlService.class)
-                .writeTransactions(new WriteTransactionsInputBuilder()
+        return rpcService.getRpc(WriteTransactions.class)
+                .invoke(new WriteTransactionsInputBuilder()
                         .setId(id)
                         .setSeconds(Uint32.valueOf(seconds))
                         .setTransactionsPerSecond(Uint32.valueOf(transactionsPerSecond))
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/tgt/GetConstantCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/tgt/GetConstantCommand.java
new file mode 100644 (file)
index 0000000..8cc874a
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.tgt;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractDOMRpcAction;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
+import org.opendaylight.mdsal.dom.api.DOMRpcResult;
+import org.opendaylight.mdsal.dom.api.DOMRpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.GetConstantInput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.GetConstantInputBuilder;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+
+@Service
+@Command(scope = "test-app", name = "get-constant", description = "Run an get-constant test")
+public class GetConstantCommand extends AbstractDOMRpcAction {
+    @Reference
+    private DOMRpcService rpcService;
+    @Reference
+    private BindingNormalizedNodeSerializer serializer;
+
+    @Override
+    protected ListenableFuture<? extends DOMRpcResult> invokeRpc() {
+        final ContainerNode input = serializer.toNormalizedNodeRpcData(new GetConstantInputBuilder().build());
+        return rpcService.invokeRpc(QName.create(GetConstantInput.QNAME, "get-constant"), input);
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/tgt/GetContextedConstantCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/tgt/GetContextedConstantCommand.java
new file mode 100644 (file)
index 0000000..762daad
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.tgt;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractDOMRpcAction;
+import org.opendaylight.clustering.it.karaf.cli.InstanceIdentifierSupport;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
+import org.opendaylight.mdsal.dom.api.DOMRpcResult;
+import org.opendaylight.mdsal.dom.api.DOMRpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.GetContextedConstantInput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.GetContextedConstantInputBuilder;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+
+@Service
+@Command(scope = "test-app", name = "get-contexted-constant", description = "Run an get-contexted-constant test")
+public class GetContextedConstantCommand extends AbstractDOMRpcAction {
+    @Reference
+    private DOMRpcService rpcService;
+    @Reference
+    private BindingNormalizedNodeSerializer serializer;
+    @Reference
+    private InstanceIdentifierSupport iidSupport;
+    @Argument(index = 0, name = "context", required = true)
+    private String context;
+
+    @Override
+    protected ListenableFuture<? extends DOMRpcResult> invokeRpc() {
+        final ContainerNode inputNode = serializer.toNormalizedNodeRpcData(new GetContextedConstantInputBuilder()
+            .setContext(iidSupport.parseArgument(context))
+            .build());
+        return rpcService.invokeRpc(QName.create(GetContextedConstantInput.QNAME, "get-contexted-constant"), inputNode);
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/tgt/GetSingletonConstantCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/tgt/GetSingletonConstantCommand.java
new file mode 100644 (file)
index 0000000..1713ecc
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.tgt;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractDOMRpcAction;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
+import org.opendaylight.mdsal.dom.api.DOMRpcResult;
+import org.opendaylight.mdsal.dom.api.DOMRpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.GetSingletonConstantInput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.GetSingletonConstantInputBuilder;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+
+@Service
+@Command(scope = "test-app", name = "get-singleton-constant", description = "Run an get-singleton-constant test")
+public class GetSingletonConstantCommand extends AbstractDOMRpcAction {
+    @Reference
+    private DOMRpcService rpcService;
+    @Reference
+    private BindingNormalizedNodeSerializer serializer;
+
+    @Override
+    protected ListenableFuture<? extends DOMRpcResult> invokeRpc() {
+        final ContainerNode inputNode =
+                serializer.toNormalizedNodeRpcData(new GetSingletonConstantInputBuilder().build());
+        return rpcService.invokeRpc(QName.create(GetSingletonConstantInput.QNAME, "get-singleton-constant"), inputNode);
+    }
+}
index b6697f5659047e8056d77447b994779a4a6c4ca3..3eaec8db6ad089134f1230f394f395e510708112 100644 (file)
@@ -13,9 +13,9 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.AddPerson;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.AddPersonInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PeopleService;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PersonId;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.Uint32;
@@ -24,7 +24,7 @@ import org.opendaylight.yangtools.yang.common.Uint32;
 @Command(scope = "test-app", name = "add-person", description = " Run an add-person test")
 public class AddPersonCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
     @Argument(index = 0, name = "id", required = true)
     private PersonId id;
     @Argument(index = 1, name = "gender", required = true)
@@ -38,7 +38,7 @@ public class AddPersonCommand extends AbstractRpcAction {
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(PeopleService.class).addPerson(new AddPersonInputBuilder()
+        return rpcService.getRpc(AddPerson.class).invoke(new AddPersonInputBuilder()
                 .setId(id)
                 .setGender(gender)
                 .setAge(Uint32.valueOf(age))
index fa88ed0c829794e1a433f085bf217fa22078ffab..a61722cc89740bec859a99378f1dc6025cd261c2 100644 (file)
@@ -12,19 +12,19 @@ import org.apache.karaf.shell.api.action.Command;
 import org.apache.karaf.shell.api.action.lifecycle.Reference;
 import org.apache.karaf.shell.api.action.lifecycle.Service;
 import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.controller.basic.rpc.test.rev160120.BasicGlobal;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.controller.basic.rpc.test.rev160120.BasicGlobalInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.controller.basic.rpc.test.rev160120.BasicRpcTestService;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
 @Service
 @Command(scope = "test-app", name = "global-basic", description = "Run a global-basic test")
 public class BasicGlobalCommand extends AbstractRpcAction {
     @Reference
-    private RpcConsumerRegistry rpcService;
+    private RpcService rpcService;
 
     @Override
     protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
-        return rpcService.getRpcService(BasicRpcTestService.class).basicGlobal(new BasicGlobalInputBuilder().build());
+        return rpcService.getRpc(BasicGlobal.class).invoke(new BasicGlobalInputBuilder().build());
     }
 }
index 505b8f62b925e4448b39ea8a86b010d3aff0b9dc..962989a7900ee45e5b7466dfd1598c281ec2bd42 100644 (file)
@@ -5,7 +5,7 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>mdsal-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../../../parent</relativePath>
     </parent>
 
index c3a6f6e41929c8f17c237646b21dfa16a3277ae3..b83c335a94814528cee09ae79dd37a116410ea05 100644 (file)
@@ -4,13 +4,13 @@
     <parent>
         <groupId>org.opendaylight.odlparent</groupId>
         <artifactId>odlparent-lite</artifactId>
-        <version>9.0.12</version>
+        <version>13.0.11</version>
         <relativePath/>
     </parent>
 
     <groupId>org.opendaylight.controller.samples</groupId>
     <artifactId>clustering-test-app</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <packaging>pom</packaging>
 
     <properties>
index f596af70fb6da3c7915dcfb03e590e3089ad4506..5a4c62eaf67f8abeb8a8d675fd82b84df4450781 100644 (file)
@@ -4,7 +4,7 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>mdsal-parent</artifactId>
-        <version>5.0.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../../../parent</relativePath>
     </parent>
 
     <packaging>bundle</packaging>
 
     <dependencies>
+        <dependency>
+            <groupId>com.github.spotbugs</groupId>
+            <artifactId>spotbugs-annotations</artifactId>
+            <optional>true</optional>
+        </dependency>
         <dependency>
             <groupId>org.opendaylight.mdsal</groupId>
             <artifactId>mdsal-eos-binding-api</artifactId>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.mdsal</groupId>
-            <artifactId>mdsal-singleton-common-api</artifactId>
+            <artifactId>mdsal-singleton-api</artifactId>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.controller.samples</groupId>
             <groupId>org.opendaylight.mdsal</groupId>
             <artifactId>mdsal-binding-api</artifactId>
         </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>mdsal-dom-api</artifactId>
+        </dependency>
         <dependency>
             <groupId>org.opendaylight.mdsal</groupId>
             <artifactId>mdsal-common-api</artifactId>
             <groupId>org.opendaylight.controller</groupId>
             <artifactId>sal-distributed-datastore</artifactId>
         </dependency>
+        <dependency>
+            <groupId>jakarta.annotation</groupId>
+            <artifactId>jakarta.annotation-api</artifactId>
+            <optional>true</optional>
+        </dependency>
+        <dependency>
+            <groupId>org.osgi</groupId>
+            <artifactId>org.osgi.service.component.annotations</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>com.guicedee.services</groupId>
+            <artifactId>javax.inject</artifactId>
+            <optional>true</optional>
+        </dependency>
     </dependencies>
 </project>
@@ -7,10 +7,16 @@
  */
 package org.opendaylight.controller.clustering.it.listener;
 
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.MoreExecutors;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
 import org.opendaylight.mdsal.binding.api.DataBroker;
-import org.opendaylight.mdsal.binding.api.WriteTransaction;
+import org.opendaylight.mdsal.binding.api.NotificationService;
+import org.opendaylight.mdsal.binding.api.NotificationService.Listener;
 import org.opendaylight.mdsal.common.api.CommitInfo;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.CarPeople;
@@ -18,37 +24,49 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controll
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.car.people.CarPersonBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.car.people.CarPersonKey;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarBought;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseListener;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class PeopleCarListener implements CarPurchaseListener {
-    private static final Logger LOG = LoggerFactory.getLogger(PeopleCarListener.class);
+@Singleton
+@Component(service = { })
+public final class CarBoughtListener implements Listener<CarBought> {
+    private static final Logger LOG = LoggerFactory.getLogger(CarBoughtListener.class);
 
-    private DataBroker dataProvider;
+    private final DataBroker dataProvider;
+    private final Registration reg;
 
-    public void setDataProvider(final DataBroker salDataProvider) {
-        this.dataProvider = salDataProvider;
+    @Inject
+    @Activate
+    public CarBoughtListener(@Reference final DataBroker dataProvider,
+            @Reference final NotificationService notifService) {
+        this.dataProvider = requireNonNull(dataProvider);
+        reg = notifService.registerListener(CarBought.class, this);
     }
 
-    @Override
-    public void onCarBought(final CarBought notification) {
+    @PreDestroy
+    @Deactivate
+    public void close() {
+        reg.close();
+    }
 
-        final CarPersonBuilder carPersonBuilder = new CarPersonBuilder();
-        carPersonBuilder.setCarId(notification.getCarId());
-        carPersonBuilder.setPersonId(notification.getPersonId());
-        CarPersonKey key = new CarPersonKey(notification.getCarId(), notification.getPersonId());
-        carPersonBuilder.withKey(key);
-        final CarPerson carPerson = carPersonBuilder.build();
+    @Override
+    public void onNotification(final CarBought notification) {
+        final var carPerson = new CarPersonBuilder()
+            .withKey(new CarPersonKey(notification.getCarId(), notification.getPersonId()))
+            .build();
 
         LOG.info("Car bought, adding car-person entry: [{}]", carPerson);
 
-        InstanceIdentifier<CarPerson> carPersonIId = InstanceIdentifier.builder(CarPeople.class)
+        final var carPersonIId = InstanceIdentifier.builder(CarPeople.class)
                 .child(CarPerson.class, carPerson.key()).build();
 
-
-        WriteTransaction tx = dataProvider.newWriteOnlyTransaction();
+        final var tx = dataProvider.newWriteOnlyTransaction();
         tx.put(LogicalDatastoreType.CONFIGURATION, carPersonIId, carPerson);
 
         tx.commit().addCallback(new FutureCallback<CommitInfo>() {
@@ -11,53 +11,87 @@ import static java.util.Objects.requireNonNull;
 
 import com.google.common.collect.ImmutableSet;
 import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.MoreExecutors;
 import com.google.common.util.concurrent.SettableFuture;
 import java.util.HashSet;
 import java.util.Set;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
 import org.opendaylight.mdsal.binding.api.DataBroker;
+import org.opendaylight.mdsal.binding.api.NotificationPublishService;
 import org.opendaylight.mdsal.binding.api.RpcProviderService;
 import org.opendaylight.mdsal.binding.api.WriteTransaction;
 import org.opendaylight.mdsal.common.api.CommitInfo;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCar;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCarOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarBoughtBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.AddPerson;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.AddPersonInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.AddPersonOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.AddPersonOutputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.People;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PeopleService;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.people.Person;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.people.PersonBuilder;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class PeopleProvider implements PeopleService, AutoCloseable {
+@Singleton
+@Component(service = { })
+public final class AddPersonImpl implements AddPerson, AutoCloseable {
+    private static final Logger LOG = LoggerFactory.getLogger(AddPersonImpl.class);
 
-    private static final Logger LOG = LoggerFactory.getLogger(PeopleProvider.class);
-
-    private final Set<ObjectRegistration<?>> regs = new HashSet<>();
-    private final DataBroker dataProvider;
+    private final Set<Registration> regs = new HashSet<>();
     private final RpcProviderService rpcProviderService;
-    private final CarPurchaseService rpcImplementation;
+    private final DataBroker dataProvider;
+    private final BuyCar buyCarRpc;
 
-    public PeopleProvider(final DataBroker dataProvider, final RpcProviderService rpcProviderService,
-            final CarPurchaseService rpcImplementation) {
+    @Inject
+    @Activate
+    public AddPersonImpl(@Reference final DataBroker dataProvider,
+            @Reference final NotificationPublishService notificationProvider,
+            @Reference final RpcProviderService rpcProviderService) {
         this.dataProvider = requireNonNull(dataProvider);
         this.rpcProviderService = requireNonNull(rpcProviderService);
-        this.rpcImplementation = requireNonNull(rpcImplementation);
 
-        // Add global registration
-        regs.add(rpcProviderService.registerRpcImplementation(CarPurchaseService.class, rpcImplementation));
+        requireNonNull(notificationProvider);
+        buyCarRpc = input -> {
+            LOG.info("Routed RPC buyCar : generating notification for buying car [{}]", input);
+            final var carBought = new CarBoughtBuilder()
+                .setCarId(input.getCarId())
+                .setPersonId(input.getPersonId())
+                .build();
+            return Futures.transform(notificationProvider.offerNotification(carBought),
+                result -> RpcResultBuilder.success(new BuyCarOutputBuilder().build()).build(),
+                MoreExecutors.directExecutor());
+        };
+
+        regs.add(rpcProviderService.registerRpcImplementation(buyCarRpc));
+        regs.add(rpcProviderService.registerRpcImplementation(this));
+    }
+
+    @PreDestroy
+    @Deactivate
+    @Override
+    public void close() {
+        regs.forEach(Registration::close);
+        regs.clear();
     }
 
     @Override
-    public ListenableFuture<RpcResult<AddPersonOutput>> addPerson(final AddPersonInput input) {
+    public ListenableFuture<RpcResult<AddPersonOutput>> invoke(final AddPersonInput input) {
         LOG.info("RPC addPerson : adding person [{}]", input);
 
         PersonBuilder builder = new PersonBuilder(input);
@@ -75,8 +109,7 @@ public class PeopleProvider implements PeopleService, AutoCloseable {
             @Override
             public void onSuccess(final CommitInfo result) {
                 LOG.info("RPC addPerson : person added successfully [{}]", person);
-                regs.add(rpcProviderService.registerRpcImplementation(CarPurchaseService.class, rpcImplementation,
-                    ImmutableSet.of(personId)));
+                regs.add(rpcProviderService.registerRpcImplementation(buyCarRpc, ImmutableSet.of(personId)));
                 LOG.info("RPC addPerson : routed rpc registered for instance ID [{}]", personId);
                 futureResult.set(RpcResultBuilder.success(new AddPersonOutputBuilder().build()).build());
             }
@@ -85,15 +118,9 @@ public class PeopleProvider implements PeopleService, AutoCloseable {
             public void onFailure(final Throwable ex) {
                 LOG.error("RPC addPerson : person addition failed [{}]", person, ex);
                 futureResult.set(RpcResultBuilder.<AddPersonOutput>failed()
-                        .withError(RpcError.ErrorType.APPLICATION, ex.getMessage()).build());
+                        .withError(ErrorType.APPLICATION, ex.getMessage()).build());
             }
         }, MoreExecutors.directExecutor());
         return futureResult;
     }
-
-    @Override
-    public void close() {
-        regs.forEach(ObjectRegistration::close);
-        regs.clear();
-    }
 }
index 7934ac90deb3d27b2fb2fd89ffd4d99b949e956e..4c41784eb5b20a70c8547e5e60c1b4ba004a73fe 100644 (file)
@@ -9,42 +9,56 @@ package org.opendaylight.controller.clustering.it.provider;
 
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
 import org.opendaylight.mdsal.binding.api.RpcProviderService;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonService;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
-import org.opendaylight.mdsal.singleton.common.api.ServiceGroupIdentifier;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.controller.basic.rpc.test.rev160120.BasicGlobalInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.controller.basic.rpc.test.rev160120.BasicGlobalOutput;
+import org.opendaylight.mdsal.singleton.api.ClusterSingletonService;
+import org.opendaylight.mdsal.singleton.api.ClusterSingletonServiceProvider;
+import org.opendaylight.mdsal.singleton.api.ServiceGroupIdentifier;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.controller.basic.rpc.test.rev160120.BasicGlobal;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.controller.basic.rpc.test.rev160120.BasicGlobalOutputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.controller.basic.rpc.test.rev160120.BasicRpcTestService;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
-import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class BasicRpcTestProvider implements ClusterSingletonService, BasicRpcTestService {
-
+@Singleton
+@Component(service = { })
+public final class BasicRpcTestProvider implements ClusterSingletonService {
     private static final Logger LOG = LoggerFactory.getLogger(BasicRpcTestProvider.class);
-    private static final ServiceGroupIdentifier IDENTIFIER = ServiceGroupIdentifier.create("Basic-rpc-test");
+    private static final ServiceGroupIdentifier IDENTIFIER = new ServiceGroupIdentifier("Basic-rpc-test");
 
     private final RpcProviderService rpcProviderRegistry;
-    private final ClusterSingletonServiceProvider singletonService;
+    private final Registration singletonRegistration;
 
-    private ObjectRegistration<?> rpcRegistration;
+    private Registration rpcRegistration = null;
 
-    public BasicRpcTestProvider(final RpcProviderService rpcProviderRegistry,
-                                final ClusterSingletonServiceProvider singletonService) {
+    @Inject
+    @Activate
+    public BasicRpcTestProvider(@Reference final RpcProviderService rpcProviderRegistry,
+                                @Reference final ClusterSingletonServiceProvider singletonService) {
         this.rpcProviderRegistry = rpcProviderRegistry;
-        this.singletonService = singletonService;
+        singletonRegistration = singletonService.registerClusterSingletonService(this);
+    }
 
-        singletonService.registerClusterSingletonService(this);
+    @PreDestroy
+    @Deactivate
+    public void close() {
+        singletonRegistration.close();
     }
 
     @Override
     public void instantiateServiceInstance() {
         LOG.info("Basic testing rpc registered as global");
-        rpcRegistration = rpcProviderRegistry.registerRpcImplementation(BasicRpcTestService.class, this);
+        rpcRegistration = rpcProviderRegistry.registerRpcImplementation((BasicGlobal) input -> {
+            LOG.info("Basic test global rpc invoked");
+            return RpcResultBuilder.success(new BasicGlobalOutputBuilder().build()).buildFuture();
+        });
     }
 
     @Override
@@ -59,11 +73,4 @@ public class BasicRpcTestProvider implements ClusterSingletonService, BasicRpcTe
     public ServiceGroupIdentifier getIdentifier() {
         return IDENTIFIER;
     }
-
-    @Override
-    public ListenableFuture<RpcResult<BasicGlobalOutput>> basicGlobal(final BasicGlobalInput input) {
-        LOG.info("Basic test global rpc invoked");
-
-        return Futures.immediateFuture(RpcResultBuilder.success(new BasicGlobalOutputBuilder().build()).build());
-    }
 }
index 6acf6744a0a57d0141de5d2f30b17acb075874ce..c3e9b89393e6dafaa46e8ab6e82006502ca96966 100644 (file)
@@ -7,12 +7,10 @@
  */
 package org.opendaylight.controller.clustering.it.provider;
 
-import org.opendaylight.mdsal.binding.api.DataObjectModification;
-import org.opendaylight.mdsal.binding.api.DataObjectModification.ModificationType;
+import java.util.List;
 import org.opendaylight.mdsal.binding.api.DataTreeChangeListener;
 import org.opendaylight.mdsal.binding.api.DataTreeModification;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.Cars;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -22,36 +20,32 @@ import org.slf4j.LoggerFactory;
  *
  * @author Ryan Goulding (ryandgoulding@gmail.com)
  */
-public class CarDataTreeChangeListener implements DataTreeChangeListener<Cars> {
+public final class CarDataTreeChangeListener implements DataTreeChangeListener<Cars> {
     private static final Logger LOG = LoggerFactory.getLogger(CarDataTreeChangeListener.class);
 
-    @java.lang.Override
-    public void onDataTreeChanged(final java.util.Collection<DataTreeModification<Cars>> changes) {
+    @Override
+    public void onDataTreeChanged(final List<DataTreeModification<Cars>> changes) {
         if (LOG.isTraceEnabled()) {
-            for (DataTreeModification<Cars> change : changes) {
+            for (var change : changes) {
                 outputChanges(change);
             }
         }
     }
 
     private static void outputChanges(final DataTreeModification<Cars> change) {
-        final DataObjectModification<Cars> rootNode = change.getRootNode();
-        final ModificationType modificationType = rootNode.getModificationType();
-        final InstanceIdentifier<Cars> rootIdentifier = change.getRootPath().getRootIdentifier();
+        final var rootNode = change.getRootNode();
+        final var modificationType = rootNode.modificationType();
+        final var rootIdentifier = change.getRootPath().path();
         switch (modificationType) {
-            case WRITE:
-            case SUBTREE_MODIFIED: {
+            case WRITE, SUBTREE_MODIFIED -> {
                 LOG.trace("onDataTreeChanged - Cars config with path {} was added or changed from {} to {}",
-                        rootIdentifier, rootNode.getDataBefore(), rootNode.getDataAfter());
-                break;
+                    rootIdentifier, rootNode.dataBefore(), rootNode.dataAfter());
             }
-            case DELETE: {
+            case DELETE -> {
                 LOG.trace("onDataTreeChanged - Cars config with path {} was deleted", rootIdentifier);
-                break;
             }
-            default: {
+            default -> {
                 LOG.trace("onDataTreeChanged called with unknown modificationType: {}", modificationType);
-                break;
             }
         }
     }
index bd27bcf0dff30f114a70b65d200e5be119200c89..8c2e0b5f29d55f877c5dac60c8a472dd47927b9c 100644 (file)
@@ -23,8 +23,8 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdent
 import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
 import org.opendaylight.yangtools.yang.data.api.schema.DataContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -33,40 +33,39 @@ import org.slf4j.LoggerFactory;
  *
  * @author Thomas Pantelis
  */
-public class CarEntryDataTreeCommitCohort implements DOMDataTreeCommitCohort {
+public final class CarEntryDataTreeCommitCohort implements DOMDataTreeCommitCohort {
     private static final Logger LOG = LoggerFactory.getLogger(CarEntryDataTreeCommitCohort.class);
 
     private static final QName YEAR_QNAME = QName.create(Cars.QNAME, "year").intern();
     private static final NodeIdentifier YEAR_NODE_ID = new NodeIdentifier(YEAR_QNAME);
 
     @Override
-    public FluentFuture<PostCanCommitStep> canCommit(final Object txId, final SchemaContext ctx,
+    public FluentFuture<PostCanCommitStep> canCommit(final Object txId, final EffectiveModelContext ctx,
             final Collection<DOMDataTreeCandidate> candidates) {
 
         for (DOMDataTreeCandidate candidate : candidates) {
             // Simple data validation - verify the year, if present, is >= 1990
 
             final DataTreeCandidateNode rootNode = candidate.getRootNode();
-            final Optional<NormalizedNode> dataAfter = rootNode.getDataAfter();
+            final NormalizedNode dataAfter = rootNode.dataAfter();
 
             LOG.info("In canCommit: modificationType: {}, dataBefore: {}, dataAfter: {}",
-                    rootNode.getModificationType(), rootNode.getDataBefore(), dataAfter);
+                    rootNode.modificationType(), rootNode.dataBefore(), dataAfter);
 
             // Note: we don't want to process DELETE modifications but we don't need to explicitly check the
             // ModificationType because dataAfter will not be present. Also dataAfter *should* always contain a
             // MapEntryNode but we verify anyway.
-            if (dataAfter.isPresent()) {
-                final NormalizedNode normalizedNode = dataAfter.get();
-                Verify.verify(normalizedNode instanceof DataContainerNode,
-                        "Expected type DataContainerNode, actual was %s", normalizedNode.getClass());
-                DataContainerNode entryNode = (DataContainerNode) normalizedNode;
+            if (dataAfter != null) {
+                Verify.verify(dataAfter instanceof DataContainerNode,
+                        "Expected type DataContainerNode, actual was %s", dataAfter.getClass());
+                DataContainerNode entryNode = (DataContainerNode) dataAfter;
                 final Optional<DataContainerChild> possibleYear = entryNode.findChildByArg(YEAR_NODE_ID);
                 if (possibleYear.isPresent()) {
-                    final Number year = (Number) possibleYear.get().body();
+                    final Number year = (Number) possibleYear.orElseThrow().body();
 
                     LOG.info("year is {}", year);
 
-                    if ((year.longValue() < 1990)) {
+                    if (year.longValue() < 1990) {
                         return FluentFutures.immediateFailedFluentFuture(new DataValidationFailedException(
                                 DOMDataTreeIdentifier.class, candidate.getRootPath(),
                                 String.format("Invalid year %d - year must be >= 1990", year)));
index 3f8bff0992709710bcfac271f4b1d5ef455805e6..bec65aa561a3357daf980b58a6f6ebbda36f8db5 100644 (file)
@@ -12,7 +12,7 @@ import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.MoreExecutors;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.util.HashSet;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutionException;
@@ -21,57 +21,71 @@ import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReference;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
 import org.opendaylight.mdsal.binding.api.DataBroker;
 import org.opendaylight.mdsal.binding.api.DataTreeIdentifier;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
 import org.opendaylight.mdsal.binding.api.WriteTransaction;
 import org.opendaylight.mdsal.common.api.CommitInfo;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.mdsal.dom.api.DOMDataBroker;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistration;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry;
+import org.opendaylight.mdsal.dom.api.DOMDataBroker.CommitCohortExtension;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
 import org.opendaylight.mdsal.eos.binding.api.Entity;
-import org.opendaylight.mdsal.eos.binding.api.EntityOwnershipChange;
 import org.opendaylight.mdsal.eos.binding.api.EntityOwnershipListener;
 import org.opendaylight.mdsal.eos.binding.api.EntityOwnershipService;
 import org.opendaylight.mdsal.eos.common.api.CandidateAlreadyRegisteredException;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.CarId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.CarService;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.Cars;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.CarsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterCommitCohort;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterCommitCohortInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterCommitCohortOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterCommitCohortOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterLoggingDtcl;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterLoggingDtclInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterLoggingDtclOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterLoggingDtclOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterOwnership;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterOwnershipInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterOwnershipOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterOwnershipOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StopStressTest;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StopStressTestInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StopStressTestOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StopStressTestOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StressTest;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StressTestInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StressTestOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StressTestOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterCommitCohort;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterCommitCohortInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterCommitCohortOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterCommitCohortOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterLoggingDtcls;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterLoggingDtclsInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterLoggingDtclsOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterLoggingDtclsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterOwnership;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterOwnershipInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterOwnershipOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterOwnershipOutputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.cars.CarEntry;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.cars.CarEntryBuilder;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.ObjectRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
+import org.opendaylight.yangtools.yang.common.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
 import org.opendaylight.yangtools.yang.common.Uint32;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -80,15 +94,14 @@ import org.slf4j.LoggerFactory;
  *
  * @author Thomas Pantelis
  */
-@SuppressFBWarnings("SLF4J_ILLEGAL_PASSED_CLASS")
-public class CarProvider implements CarService {
-    private static final Logger LOG_PURCHASE_CAR = LoggerFactory.getLogger(PurchaseCarProvider.class);
-
-    private static final Logger LOG_CAR_PROVIDER = LoggerFactory.getLogger(CarProvider.class);
+@Singleton
+@Component(service = { })
+public final class CarProvider {
+    private static final Logger LOG = LoggerFactory.getLogger(CarProvider.class);
 
     private static final String ENTITY_TYPE = "cars";
     private static final InstanceIdentifier<Cars> CARS_IID = InstanceIdentifier.builder(Cars.class).build();
-    private static final DataTreeIdentifier<Cars> CARS_DTID = DataTreeIdentifier.create(
+    private static final DataTreeIdentifier<Cars> CARS_DTID = DataTreeIdentifier.of(
             LogicalDatastoreType.CONFIGURATION, CARS_IID);
 
     private final DataBroker dataProvider;
@@ -97,28 +110,44 @@ public class CarProvider implements CarService {
     private final AtomicLong succcessCounter = new AtomicLong();
     private final AtomicLong failureCounter = new AtomicLong();
 
-    private final CarEntityOwnershipListener ownershipListener = new CarEntityOwnershipListener();
-    private final AtomicBoolean registeredListener = new AtomicBoolean();
+    private final EntityOwnershipListener ownershipListener = (entity, change, inJeopardy) ->
+        LOG.info("ownershipChanged: entity={} change={} inJeopardy={}", entity, change, inJeopardy);
 
-    private final Set<ListenerRegistration<?>> carsDclRegistrations = ConcurrentHashMap.newKeySet();
-    private final Set<ListenerRegistration<CarDataTreeChangeListener>> carsDtclRegistrations =
-            ConcurrentHashMap.newKeySet();
+    private final AtomicBoolean registeredListener = new AtomicBoolean();
+    private final AtomicReference<Registration> commitCohortReg = new AtomicReference<>();
+    private final Set<ObjectRegistration<?>> carsDclRegistrations = ConcurrentHashMap.newKeySet();
+    private final Set<Registration> regs = new HashSet<>();
+    private final Set<Registration> carsDtclRegistrations = ConcurrentHashMap.newKeySet();
 
     private volatile Thread testThread;
     private volatile boolean stopThread;
-    private final AtomicReference<DOMDataTreeCommitCohortRegistration<CarEntryDataTreeCommitCohort>> commitCohortReg =
-            new AtomicReference<>();
 
-    public CarProvider(final DataBroker dataProvider, final EntityOwnershipService ownershipService,
-            final DOMDataBroker domDataBroker) {
+    @Inject
+    @Activate
+    public CarProvider(@Reference final DataBroker dataProvider,
+            @Reference final EntityOwnershipService ownershipService, @Reference final DOMDataBroker domDataBroker,
+            @Reference final RpcProviderService rpcProviderService) {
         this.dataProvider = dataProvider;
         this.ownershipService = ownershipService;
         this.domDataBroker = domDataBroker;
+        regs.add(rpcProviderService.registerRpcImplementations(
+            (StressTest) this::stressTest,
+            (StopStressTest) this::stopStressTest,
+            (RegisterOwnership) this::registerOwnership,
+            (UnregisterOwnership) this::unregisterOwnership,
+            (RegisterLoggingDtcl) this::registerLoggingDtcl,
+            (UnregisterLoggingDtcls) this::unregisterLoggingDtcls,
+            (RegisterCommitCohort) this::registerCommitCohort,
+            (UnregisterCommitCohort) this::unregisterCommitCohort));
     }
 
+    @PreDestroy
+    @Deactivate
     public void close() {
         stopThread();
         closeCommitCohortRegistration();
+        regs.forEach(Registration::close);
+        regs.clear();
     }
 
     private void stopThread() {
@@ -134,14 +163,13 @@ public class CarProvider implements CarService {
         }
     }
 
-    @Override
-    public ListenableFuture<RpcResult<StressTestOutput>> stressTest(final StressTestInput input) {
+    private ListenableFuture<RpcResult<StressTestOutput>> stressTest(final StressTestInput input) {
         final int inputRate;
         final long inputCount;
 
         // If rate is not provided, or given as zero, then just return.
         if (input.getRate() == null || input.getRate().toJava() == 0) {
-            LOG_PURCHASE_CAR.info("Exiting stress test as no rate is given.");
+            LOG.info("Exiting stress test as no rate is given.");
             return Futures.immediateFuture(RpcResultBuilder.<StressTestOutput>failed()
                     .withError(ErrorType.PROTOCOL, "invalid rate")
                     .build());
@@ -154,7 +182,7 @@ public class CarProvider implements CarService {
             inputCount = 0;
         }
 
-        LOG_PURCHASE_CAR.info("Stress test starting : rate: {} count: {}", inputRate, inputCount);
+        LOG.info("Stress test starting : rate: {} count: {}", inputRate, inputCount);
 
         stopThread();
         // clear counters
@@ -167,7 +195,7 @@ public class CarProvider implements CarService {
         try {
             tx.commit().get(5, TimeUnit.SECONDS);
         } catch (TimeoutException | InterruptedException | ExecutionException e) {
-            LOG_PURCHASE_CAR.error("Put Cars failed",e);
+            LOG.error("Put Cars failed",e);
             return Futures.immediateFuture(RpcResultBuilder.success(new StressTestOutputBuilder().build()).build());
         }
 
@@ -195,7 +223,7 @@ public class CarProvider implements CarService {
                     public void onFailure(final Throwable ex) {
                         // Transaction failed
                         failureCounter.getAndIncrement();
-                        LOG_CAR_PROVIDER.error("Put Cars failed", ex);
+                        LOG.error("Put Cars failed", ex);
                     }
                 }, MoreExecutors.directExecutor());
                 try {
@@ -205,7 +233,7 @@ public class CarProvider implements CarService {
                 }
 
                 if (count.get() % 1000 == 0) {
-                    LOG_PURCHASE_CAR.info("Cars created {}, time: {}", count.get(), sw.elapsed(TimeUnit.SECONDS));
+                    LOG.info("Cars created {}, time: {}", count.get(), sw.elapsed(TimeUnit.SECONDS));
                 }
 
                 // Check if a count is specified in input and we have created that many cars.
@@ -214,15 +242,14 @@ public class CarProvider implements CarService {
                 }
             }
 
-            LOG_PURCHASE_CAR.info("Stress test thread stopping after creating {} cars.", count.get());
+            LOG.info("Stress test thread stopping after creating {} cars.", count.get());
         });
         testThread.start();
 
         return Futures.immediateFuture(RpcResultBuilder.success(new StressTestOutputBuilder().build()).build());
     }
 
-    @Override
-    public ListenableFuture<RpcResult<StopStressTestOutput>> stopStressTest(final StopStressTestInput input) {
+    private ListenableFuture<RpcResult<StopStressTestOutput>> stopStressTest(final StopStressTestInput input) {
         stopThread();
         StopStressTestOutputBuilder stopStressTestOutput;
         stopStressTestOutput = new StopStressTestOutputBuilder()
@@ -230,17 +257,15 @@ public class CarProvider implements CarService {
                 .setFailureCount(Uint32.valueOf(failureCounter.longValue()));
 
         final StopStressTestOutput result = stopStressTestOutput.build();
-        LOG_PURCHASE_CAR.info("Executed Stop Stress test; No. of cars created {}; "
-                + "No. of cars failed {}; ", succcessCounter, failureCounter);
+        LOG.info("Executed Stop Stress test; No. of cars created {}; No. of cars failed {}; ",
+            succcessCounter, failureCounter);
         // clear counters
         succcessCounter.set(0);
         failureCounter.set(0);
         return Futures.immediateFuture(RpcResultBuilder.<StopStressTestOutput>success(result).build());
     }
 
-
-    @Override
-    public ListenableFuture<RpcResult<RegisterOwnershipOutput>> registerOwnership(final RegisterOwnershipInput input) {
+    private ListenableFuture<RpcResult<RegisterOwnershipOutput>> registerOwnership(final RegisterOwnershipInput input) {
         if (registeredListener.compareAndSet(false, true)) {
             ownershipService.registerListener(ENTITY_TYPE, ownershipListener);
         }
@@ -256,49 +281,36 @@ public class CarProvider implements CarService {
         return RpcResultBuilder.success(new RegisterOwnershipOutputBuilder().build()).buildFuture();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<UnregisterOwnershipOutput>> unregisterOwnership(
+    private ListenableFuture<RpcResult<UnregisterOwnershipOutput>> unregisterOwnership(
             final UnregisterOwnershipInput input) {
         return RpcResultBuilder.success(new UnregisterOwnershipOutputBuilder().build()).buildFuture();
     }
 
-    private static class CarEntityOwnershipListener implements EntityOwnershipListener {
-        @Override
-        public void ownershipChanged(final EntityOwnershipChange ownershipChange) {
-            LOG_CAR_PROVIDER.info("ownershipChanged: {}", ownershipChange);
-        }
-    }
-
-    @Override
-    public ListenableFuture<RpcResult<RegisterLoggingDtclOutput>> registerLoggingDtcl(
+    private ListenableFuture<RpcResult<RegisterLoggingDtclOutput>> registerLoggingDtcl(
             final RegisterLoggingDtclInput input) {
-        LOG_CAR_PROVIDER.info("Registering a new CarDataTreeChangeListener");
-        final ListenerRegistration<CarDataTreeChangeListener> carsDtclRegistration =
-                dataProvider.registerDataTreeChangeListener(CARS_DTID, new CarDataTreeChangeListener());
-
-        carsDtclRegistrations.add(carsDtclRegistration);
+        LOG.info("Registering a new CarDataTreeChangeListener");
+        final var reg = dataProvider.registerTreeChangeListener(CARS_DTID, new CarDataTreeChangeListener());
+        carsDtclRegistrations.add(reg);
         return RpcResultBuilder.success(new RegisterLoggingDtclOutputBuilder().build()).buildFuture();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<UnregisterLoggingDtclsOutput>> unregisterLoggingDtcls(
+    private ListenableFuture<RpcResult<UnregisterLoggingDtclsOutput>> unregisterLoggingDtcls(
             final UnregisterLoggingDtclsInput input) {
-        LOG_CAR_PROVIDER.info("Unregistering the CarDataTreeChangeListener(s)");
+        LOG.info("Unregistering the CarDataTreeChangeListener(s)");
         synchronized (carsDtclRegistrations) {
             int numListeners = 0;
-            for (ListenerRegistration<CarDataTreeChangeListener> carsDtclRegistration : carsDtclRegistrations) {
+            for (var carsDtclRegistration : carsDtclRegistrations) {
                 carsDtclRegistration.close();
                 numListeners++;
             }
             carsDtclRegistrations.clear();
-            LOG_CAR_PROVIDER.info("Unregistered {} CaraDataTreeChangeListener(s)", numListeners);
+            LOG.info("Unregistered {} CaraDataTreeChangeListener(s)", numListeners);
         }
         return RpcResultBuilder.success(new UnregisterLoggingDtclsOutputBuilder().build()).buildFuture();
     }
 
-    @Override
     @SuppressWarnings("checkstyle:IllegalCatch")
-    public ListenableFuture<RpcResult<UnregisterCommitCohortOutput>> unregisterCommitCohort(
+    private ListenableFuture<RpcResult<UnregisterCommitCohortOutput>> unregisterCommitCohort(
             final UnregisterCommitCohortInput input) {
         closeCommitCohortRegistration();
 
@@ -306,23 +318,20 @@ public class CarProvider implements CarService {
     }
 
     private void closeCommitCohortRegistration() {
-        final DOMDataTreeCommitCohortRegistration<CarEntryDataTreeCommitCohort> reg = commitCohortReg.getAndSet(null);
+        final var reg = commitCohortReg.getAndSet(null);
         if (reg != null) {
             reg.close();
-            LOG_CAR_PROVIDER.info("Unregistered commit cohort");
+            LOG.info("Unregistered commit cohort");
         }
     }
 
-    @Override
-    public synchronized ListenableFuture<RpcResult<RegisterCommitCohortOutput>> registerCommitCohort(
+    private synchronized ListenableFuture<RpcResult<RegisterCommitCohortOutput>> registerCommitCohort(
             final RegisterCommitCohortInput input) {
         if (commitCohortReg.get() != null) {
             return RpcResultBuilder.success(new RegisterCommitCohortOutputBuilder().build()).buildFuture();
         }
 
-        final DOMDataTreeCommitCohortRegistry commitCohortRegistry = domDataBroker.getExtensions().getInstance(
-            DOMDataTreeCommitCohortRegistry.class);
-
+        final var commitCohortRegistry = domDataBroker.extension(CommitCohortExtension.class);
         if (commitCohortRegistry == null) {
             // Shouldn't happen
             return RpcResultBuilder.<RegisterCommitCohortOutput>failed().withError(ErrorType.APPLICATION,
@@ -337,10 +346,10 @@ public class CarProvider implements CarService {
         // to address all list entries, the second path argument is wild-carded by specifying just the CarEntry.QNAME.
         final YangInstanceIdentifier carEntryPath = YangInstanceIdentifier.builder(
                 YangInstanceIdentifier.of(Cars.QNAME)).node(CarEntry.QNAME).node(CarEntry.QNAME).build();
-        commitCohortReg.set(commitCohortRegistry.registerCommitCohort(new DOMDataTreeIdentifier(
+        commitCohortReg.set(commitCohortRegistry.registerCommitCohort(DOMDataTreeIdentifier.of(
             LogicalDatastoreType.CONFIGURATION, carEntryPath), new CarEntryDataTreeCommitCohort()));
 
-        LOG_CAR_PROVIDER.info("Registered commit cohort");
+        LOG.info("Registered commit cohort");
 
         return RpcResultBuilder.success(new RegisterCommitCohortOutputBuilder().build()).buildFuture();
     }
index 8e26cb6f957470edd036b537cd601275c84a5298..10dafba452be749dbafd447f93c70b65ccd2f1e3 100644 (file)
@@ -8,11 +8,10 @@
 package org.opendaylight.controller.clustering.it.provider;
 
 import akka.actor.ActorRef;
-import akka.actor.ActorSystem;
+import akka.dispatch.Futures;
 import akka.dispatch.OnComplete;
 import akka.pattern.Patterns;
 import com.google.common.base.Strings;
-import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.SettableFuture;
 import java.util.HashMap;
@@ -21,7 +20,9 @@ import java.util.Optional;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
-import org.opendaylight.controller.cluster.ActorSystemProvider;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
 import org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.cluster.raft.client.messages.Shutdown;
@@ -39,150 +40,201 @@ import org.opendaylight.mdsal.binding.api.RpcProviderService;
 import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.mdsal.dom.api.DOMDataBroker;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeService;
+import org.opendaylight.mdsal.dom.api.DOMDataBroker.DataTreeChangeExtension;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction;
-import org.opendaylight.mdsal.dom.api.DOMRpcImplementationRegistration;
 import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
 import org.opendaylight.mdsal.dom.api.DOMSchemaService;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceRegistration;
+import org.opendaylight.mdsal.singleton.api.ClusterSingletonServiceProvider;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.AddShardReplica;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.AddShardReplicaInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.AddShardReplicaOutput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CheckPublishNotifications;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CheckPublishNotificationsInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CheckPublishNotificationsOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CheckPublishNotificationsOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.IsClientAborted;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.IsClientAbortedInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.IsClientAbortedOutput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterBoundConstant;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterBoundConstantInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterBoundConstantOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterBoundConstantOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterConstant;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterConstantInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterConstantOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterConstantOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterDefaultConstant;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterDefaultConstantInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterDefaultConstantOutput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterFlappingSingleton;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterFlappingSingletonInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterFlappingSingletonOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterFlappingSingletonOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterSingletonConstant;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterSingletonConstantInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterSingletonConstantOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterSingletonConstantOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RemoveShardReplica;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RemoveShardReplicaInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RemoveShardReplicaOutput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ShutdownShardReplica;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ShutdownShardReplicaInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ShutdownShardReplicaOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ShutdownShardReplicaOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.StartPublishNotifications;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.StartPublishNotificationsInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.StartPublishNotificationsOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.StartPublishNotificationsOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDdtl;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDdtlInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDdtlOutput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDtcl;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDtclInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDtclOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDtclOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeYnl;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeYnlInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeYnlOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeYnlOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterBoundConstant;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterBoundConstantInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterBoundConstantOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterBoundConstantOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterConstant;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterConstantInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterConstantOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterConstantOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterDefaultConstant;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterDefaultConstantInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterDefaultConstantOutput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterFlappingSingleton;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterFlappingSingletonInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterFlappingSingletonOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterFlappingSingletonOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterSingletonConstant;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterSingletonConstantInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterSingletonConstantOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterSingletonConstantOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDdtl;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDdtlInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDdtlOutput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDtcl;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDtclInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDtclOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDtclOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeYnl;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeYnlInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeYnlOutput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.WriteTransactions;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.WriteTransactionsInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.WriteTransactionsOutput;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.IdSequence;
+import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
 import org.opendaylight.yangtools.concepts.ObjectRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
+import org.opendaylight.yangtools.yang.common.ErrorTag;
+import org.opendaylight.yangtools.yang.common.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.duration.FiniteDuration;
 
-public class MdsalLowLevelTestProvider implements OdlMdsalLowlevelControlService {
+@Singleton
+@Component(service = {})
+public final class MdsalLowLevelTestProvider {
     private static final Logger LOG = LoggerFactory.getLogger(MdsalLowLevelTestProvider.class);
 
-    private final RpcProviderService rpcRegistry;
-    private final ObjectRegistration<OdlMdsalLowlevelControlService> registration;
+    private final Registration registration;
     private final DistributedDataStoreInterface configDataStore;
     private final BindingNormalizedNodeSerializer bindingNormalizedNodeSerializer;
     private final DOMDataBroker domDataBroker;
     private final NotificationPublishService notificationPublishService;
     private final NotificationService notificationService;
-    private final DOMSchemaService schemaService;
     private final ClusterSingletonServiceProvider singletonService;
     private final DOMRpcProviderService domRpcService;
-    private final DOMDataTreeChangeService domDataTreeChangeService;
-    private final ActorSystem actorSystem;
+    private final DataTreeChangeExtension dataTreeChangeExtension;
 
-    private final Map<InstanceIdentifier<?>, DOMRpcImplementationRegistration<RoutedGetConstantService>>
-            routedRegistrations = new HashMap<>();
-
-    private final Map<String, ListenerRegistration<YnlListener>> ynlRegistrations = new HashMap<>();
+    private final Map<InstanceIdentifier<?>, Registration> routedRegistrations = new HashMap<>();
+    private final Map<String, ObjectRegistration<YnlListener>> ynlRegistrations = new HashMap<>();
+    private final Map<String, PublishNotificationsTask> publishNotificationsTasks = new HashMap<>();
 
-    private DOMRpcImplementationRegistration<GetConstantService> globalGetConstantRegistration = null;
-    private ClusterSingletonServiceRegistration getSingletonConstantRegistration;
+    private Registration globalGetConstantRegistration = null;
+    private Registration getSingletonConstantRegistration;
     private FlappingSingletonService flappingSingletonService;
-    private ListenerRegistration<DOMDataTreeChangeListener> dtclReg;
+    private Registration dtclReg;
     private IdIntsListener idIntsListener;
-    private final Map<String, PublishNotificationsTask> publishNotificationsTasks = new HashMap<>();
 
-    public MdsalLowLevelTestProvider(final RpcProviderService rpcRegistry,
-                                     final DOMRpcProviderService domRpcService,
-                                     final ClusterSingletonServiceProvider singletonService,
-                                     final DOMSchemaService schemaService,
-                                     final BindingNormalizedNodeSerializer bindingNormalizedNodeSerializer,
-                                     final NotificationPublishService notificationPublishService,
-                                     final NotificationService notificationService,
-                                     final DOMDataBroker domDataBroker,
-                                     final DistributedDataStoreInterface configDataStore,
-                                     final ActorSystemProvider actorSystemProvider) {
-        this.rpcRegistry = rpcRegistry;
+    @Inject
+    @Activate
+    public MdsalLowLevelTestProvider(
+            @Reference final RpcProviderService rpcRegistry,
+            @Reference final DOMRpcProviderService domRpcService,
+            @Reference final ClusterSingletonServiceProvider singletonService,
+            @Reference final DOMSchemaService schemaService,
+            @Reference final BindingNormalizedNodeSerializer bindingNormalizedNodeSerializer,
+            @Reference final NotificationPublishService notificationPublishService,
+            @Reference final NotificationService notificationService,
+            @Reference final DOMDataBroker domDataBroker,
+            @Reference final DistributedDataStoreInterface configDataStore) {
         this.domRpcService = domRpcService;
         this.singletonService = singletonService;
-        this.schemaService = schemaService;
         this.bindingNormalizedNodeSerializer = bindingNormalizedNodeSerializer;
         this.notificationPublishService = notificationPublishService;
         this.notificationService = notificationService;
         this.domDataBroker = domDataBroker;
         this.configDataStore = configDataStore;
-        this.actorSystem = actorSystemProvider.getActorSystem();
 
-        domDataTreeChangeService = domDataBroker.getExtensions().getInstance(DOMDataTreeChangeService.class);
+        dataTreeChangeExtension = domDataBroker.extension(DataTreeChangeExtension.class);
+
+        registration = rpcRegistry.registerRpcImplementations(
+            (UnregisterSingletonConstant) this::unregisterSingletonConstant,
+            (StartPublishNotifications) this::startPublishNotifications,
+            (SubscribeDdtl) this::subscribeDdtl,
+            (WriteTransactions) this::writeTransactions,
+            (IsClientAborted) this::isClientAborted,
+            (RemoveShardReplica) this::removeShardReplica,
+            (SubscribeYnl) this::subscribeYnl,
+            (UnregisterBoundConstant) this::unregisterBoundConstant,
+            (RegisterSingletonConstant) this::registerSingletonConstant,
+            (RegisterDefaultConstant) this::registerDefaultConstant,
+            (UnregisterConstant) this::unregisterConstant,
+            (UnregisterFlappingSingleton) this::unregisterFlappingSingleton,
+            (AddShardReplica) this::addShardReplica,
+            (RegisterBoundConstant) this::registerBoundConstant,
+            (RegisterFlappingSingleton) this::registerFlappingSingleton,
+            (UnsubscribeDdtl) this::unsubscribeDdtl,
+            (UnsubscribeYnl) this::unsubscribeYnl,
+            (CheckPublishNotifications) this::checkPublishNotifications,
+            (ShutdownShardReplica) this::shutdownShardReplica,
+            (RegisterConstant) this::registerConstant,
+            (UnregisterDefaultConstant) this::unregisterDefaultConstant,
+            (SubscribeDtcl) this::subscribeDtcl,
+            (UnsubscribeDtcl) this::unsubscribeDtcl);
+    }
 
-        registration = rpcRegistry.registerRpcImplementation(OdlMdsalLowlevelControlService.class, this);
+    @PreDestroy
+    @Deactivate
+    public void close() {
+        registration.close();
     }
 
-    @Override
     @SuppressWarnings("checkstyle:IllegalCatch")
-    public ListenableFuture<RpcResult<UnregisterSingletonConstantOutput>> unregisterSingletonConstant(
+    private ListenableFuture<RpcResult<UnregisterSingletonConstantOutput>> unregisterSingletonConstant(
             final UnregisterSingletonConstantInput input) {
         LOG.info("In unregisterSingletonConstant");
 
         if (getSingletonConstantRegistration == null) {
-            return RpcResultBuilder.<UnregisterSingletonConstantOutput>failed().withError(ErrorType.RPC, "data-missing",
-                    "No prior RPC was registered").buildFuture();
+            return RpcResultBuilder.<UnregisterSingletonConstantOutput>failed()
+                .withError(ErrorType.RPC, ErrorTag.DATA_MISSING, "No prior RPC was registered")
+                .buildFuture();
         }
 
         try {
@@ -198,8 +250,7 @@ public class MdsalLowLevelTestProvider implements OdlMdsalLowlevelControlService
         }
     }
 
-    @Override
-    public ListenableFuture<RpcResult<StartPublishNotificationsOutput>> startPublishNotifications(
+    private ListenableFuture<RpcResult<StartPublishNotificationsOutput>> startPublishNotifications(
             final StartPublishNotificationsInput input) {
         LOG.info("In startPublishNotifications - input: {}", input);
 
@@ -213,81 +264,85 @@ public class MdsalLowLevelTestProvider implements OdlMdsalLowlevelControlService
         return RpcResultBuilder.success(new StartPublishNotificationsOutputBuilder().build()).buildFuture();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<SubscribeDtclOutput>> subscribeDtcl(final SubscribeDtclInput input) {
+    private ListenableFuture<RpcResult<SubscribeDtclOutput>> subscribeDtcl(final SubscribeDtclInput input) {
         LOG.info("In subscribeDtcl - input: {}", input);
 
         if (dtclReg != null) {
-            return RpcResultBuilder.<SubscribeDtclOutput>failed().withError(ErrorType.RPC,
-                "data-exists", "There is already a DataTreeChangeListener registered for id-ints").buildFuture();
+            return RpcResultBuilder.<SubscribeDtclOutput>failed().withError(ErrorType.RPC, ErrorTag.DATA_EXISTS,
+                "There is already a DataTreeChangeListener registered for id-ints")
+                .buildFuture();
         }
 
         idIntsListener = new IdIntsListener();
 
-        dtclReg = domDataTreeChangeService.registerDataTreeChangeListener(
-            new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, WriteTransactionsHandler.ID_INT_YID),
+        dtclReg = dataTreeChangeExtension.registerTreeChangeListener(
+            DOMDataTreeIdentifier.of(LogicalDatastoreType.CONFIGURATION, WriteTransactionsHandler.ID_INT_YID),
             idIntsListener);
 
         return RpcResultBuilder.success(new SubscribeDtclOutputBuilder().build()).buildFuture();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<WriteTransactionsOutput>> writeTransactions(final WriteTransactionsInput input) {
+    private ListenableFuture<RpcResult<WriteTransactionsOutput>> writeTransactions(final WriteTransactionsInput input) {
         return WriteTransactionsHandler.start(domDataBroker, input);
     }
 
-    @Override
-    public ListenableFuture<RpcResult<IsClientAbortedOutput>> isClientAborted(final IsClientAbortedInput input) {
+    private ListenableFuture<RpcResult<IsClientAbortedOutput>> isClientAborted(final IsClientAbortedInput input) {
         return null;
     }
 
-    @Override
-    public ListenableFuture<RpcResult<RemoveShardReplicaOutput>> removeShardReplica(
+    private ListenableFuture<RpcResult<RemoveShardReplicaOutput>> removeShardReplica(
             final RemoveShardReplicaInput input) {
         return null;
     }
 
-    @Override
-    public ListenableFuture<RpcResult<SubscribeYnlOutput>> subscribeYnl(final SubscribeYnlInput input) {
+    private ListenableFuture<RpcResult<SubscribeYnlOutput>> subscribeYnl(final SubscribeYnlInput input) {
         LOG.info("In subscribeYnl - input: {}", input);
 
         if (ynlRegistrations.containsKey(input.getId())) {
-            return RpcResultBuilder.<SubscribeYnlOutput>failed().withError(ErrorType.RPC,
-                "data-exists", "There is already a listener registered for id: " + input.getId()).buildFuture();
+            return RpcResultBuilder.<SubscribeYnlOutput>failed()
+                .withError(ErrorType.RPC, ErrorTag.DATA_EXISTS,
+                    "There is already a listener registered for id: " + input.getId())
+                .buildFuture();
         }
 
-        ynlRegistrations.put(input.getId(),
-                notificationService.registerNotificationListener(new YnlListener(input.getId())));
+        final var id = input.getId();
+        final var listener = new YnlListener(id);
+        final var reg = notificationService.registerListener(IdSequence.class, listener);
+        ynlRegistrations.put(id, new AbstractObjectRegistration<>(listener) {
+            @Override
+            protected void removeRegistration() {
+                reg.close();
+            }
+        });
 
         return RpcResultBuilder.success(new SubscribeYnlOutputBuilder().build()).buildFuture();
     }
 
 
-    @Override
-    public ListenableFuture<RpcResult<UnregisterBoundConstantOutput>> unregisterBoundConstant(
+    private ListenableFuture<RpcResult<UnregisterBoundConstantOutput>> unregisterBoundConstant(
             final UnregisterBoundConstantInput input) {
         LOG.info("In unregisterBoundConstant - {}", input);
 
-        final DOMRpcImplementationRegistration<RoutedGetConstantService> rpcRegistration =
-                routedRegistrations.remove(input.getContext());
-
+        final var rpcRegistration = routedRegistrations.remove(input.getContext());
         if (rpcRegistration == null) {
-            return RpcResultBuilder.<UnregisterBoundConstantOutput>failed().withError(
-                ErrorType.RPC, "data-missing", "No prior RPC was registered for " + input.getContext()).buildFuture();
+            return RpcResultBuilder.<UnregisterBoundConstantOutput>failed()
+                .withError(ErrorType.RPC, ErrorTag.DATA_MISSING,
+                    "No prior RPC was registered for " + input.getContext())
+                .buildFuture();
         }
 
         rpcRegistration.close();
         return RpcResultBuilder.success(new UnregisterBoundConstantOutputBuilder().build()).buildFuture();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<RegisterSingletonConstantOutput>> registerSingletonConstant(
+    private ListenableFuture<RpcResult<RegisterSingletonConstantOutput>> registerSingletonConstant(
             final RegisterSingletonConstantInput input) {
         LOG.info("In registerSingletonConstant - input: {}", input);
 
         if (input.getConstant() == null) {
-            return RpcResultBuilder.<RegisterSingletonConstantOutput>failed().withError(
-                    ErrorType.RPC, "invalid-value", "Constant value is null").buildFuture();
+            return RpcResultBuilder.<RegisterSingletonConstantOutput>failed()
+                .withError(ErrorType.RPC, ErrorTag.INVALID_VALUE, "Constant value is null")
+                .buildFuture();
         }
 
         getSingletonConstantRegistration =
@@ -296,36 +351,35 @@ public class MdsalLowLevelTestProvider implements OdlMdsalLowlevelControlService
         return RpcResultBuilder.success(new RegisterSingletonConstantOutputBuilder().build()).buildFuture();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<RegisterDefaultConstantOutput>> registerDefaultConstant(
+    private ListenableFuture<RpcResult<RegisterDefaultConstantOutput>> registerDefaultConstant(
             final RegisterDefaultConstantInput input) {
         return null;
     }
 
-    @Override
-    public ListenableFuture<RpcResult<UnregisterConstantOutput>> unregisterConstant(
+    private ListenableFuture<RpcResult<UnregisterConstantOutput>> unregisterConstant(
             final UnregisterConstantInput input) {
         LOG.info("In unregisterConstant");
 
         if (globalGetConstantRegistration == null) {
-            return RpcResultBuilder.<UnregisterConstantOutput>failed().withError(
-                ErrorType.RPC, "data-missing", "No prior RPC was registered").buildFuture();
+            return RpcResultBuilder.<UnregisterConstantOutput>failed()
+                .withError(ErrorType.RPC, ErrorTag.DATA_MISSING, "No prior RPC was registered")
+                .buildFuture();
         }
 
         globalGetConstantRegistration.close();
         globalGetConstantRegistration = null;
 
-        return Futures.immediateFuture(RpcResultBuilder.success(new UnregisterConstantOutputBuilder().build()).build());
+        return RpcResultBuilder.success(new UnregisterConstantOutputBuilder().build()).buildFuture();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<UnregisterFlappingSingletonOutput>> unregisterFlappingSingleton(
+    private ListenableFuture<RpcResult<UnregisterFlappingSingletonOutput>> unregisterFlappingSingleton(
             final UnregisterFlappingSingletonInput input) {
         LOG.info("In unregisterFlappingSingleton");
 
         if (flappingSingletonService == null) {
-            return RpcResultBuilder.<UnregisterFlappingSingletonOutput>failed().withError(
-                ErrorType.RPC, "data-missing", "No prior RPC was registered").buildFuture();
+            return RpcResultBuilder.<UnregisterFlappingSingletonOutput>failed()
+                .withError(ErrorType.RPC, ErrorTag.DATA_MISSING, "No prior RPC was registered")
+                .buildFuture();
         }
 
         final long flapCount = flappingSingletonService.setInactive();
@@ -335,52 +389,49 @@ public class MdsalLowLevelTestProvider implements OdlMdsalLowlevelControlService
                 .buildFuture();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<AddShardReplicaOutput>> addShardReplica(final AddShardReplicaInput input) {
+    private ListenableFuture<RpcResult<AddShardReplicaOutput>> addShardReplica(final AddShardReplicaInput input) {
         throw new UnsupportedOperationException();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<SubscribeDdtlOutput>> subscribeDdtl(final SubscribeDdtlInput input) {
+    private ListenableFuture<RpcResult<SubscribeDdtlOutput>> subscribeDdtl(final SubscribeDdtlInput input) {
         throw new UnsupportedOperationException();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<RegisterBoundConstantOutput>> registerBoundConstant(
+    private ListenableFuture<RpcResult<RegisterBoundConstantOutput>> registerBoundConstant(
             final RegisterBoundConstantInput input) {
         LOG.info("In registerBoundConstant - input: {}", input);
 
         if (input.getContext() == null) {
             return RpcResultBuilder.<RegisterBoundConstantOutput>failed().withError(
-                    ErrorType.RPC, "invalid-value", "Context value is null").buildFuture();
+                    ErrorType.RPC, ErrorTag.INVALID_VALUE, "Context value is null").buildFuture();
         }
 
         if (input.getConstant() == null) {
             return RpcResultBuilder.<RegisterBoundConstantOutput>failed().withError(
-                    ErrorType.RPC, "invalid-value", "Constant value is null").buildFuture();
+                    ErrorType.RPC, ErrorTag.INVALID_VALUE, "Constant value is null").buildFuture();
         }
 
         if (routedRegistrations.containsKey(input.getContext())) {
             return RpcResultBuilder.<RegisterBoundConstantOutput>failed().withError(ErrorType.RPC,
-                "data-exists", "There is already an rpc registered for context: " + input.getContext()).buildFuture();
+                ErrorTag.DATA_EXISTS, "There is already an rpc registered for context: " + input.getContext())
+                .buildFuture();
         }
 
-        final DOMRpcImplementationRegistration<RoutedGetConstantService> rpcRegistration =
-                RoutedGetConstantService.registerNew(bindingNormalizedNodeSerializer, domRpcService,
-                        input.getConstant(), input.getContext());
+        final var rpcRegistration = RoutedGetConstantService.registerNew(bindingNormalizedNodeSerializer, domRpcService,
+            input.getConstant(), input.getContext());
 
         routedRegistrations.put(input.getContext(), rpcRegistration);
         return RpcResultBuilder.success(new RegisterBoundConstantOutputBuilder().build()).buildFuture();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<RegisterFlappingSingletonOutput>> registerFlappingSingleton(
+    private ListenableFuture<RpcResult<RegisterFlappingSingletonOutput>> registerFlappingSingleton(
             final RegisterFlappingSingletonInput input) {
         LOG.info("In registerFlappingSingleton");
 
         if (flappingSingletonService != null) {
-            return RpcResultBuilder.<RegisterFlappingSingletonOutput>failed().withError(ErrorType.RPC,
-                "data-exists", "There is already an rpc registered").buildFuture();
+            return RpcResultBuilder.<RegisterFlappingSingletonOutput>failed()
+                .withError(ErrorType.RPC, ErrorTag.DATA_EXISTS, "There is already an rpc registered")
+                .buildFuture();
         }
 
         flappingSingletonService = new FlappingSingletonService(singletonService);
@@ -388,13 +439,13 @@ public class MdsalLowLevelTestProvider implements OdlMdsalLowlevelControlService
         return RpcResultBuilder.success(new RegisterFlappingSingletonOutputBuilder().build()).buildFuture();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<UnsubscribeDtclOutput>> unsubscribeDtcl(final UnsubscribeDtclInput input) {
+    private ListenableFuture<RpcResult<UnsubscribeDtclOutput>> unsubscribeDtcl(final UnsubscribeDtclInput input) {
         LOG.info("In unsubscribeDtcl");
 
         if (idIntsListener == null || dtclReg == null) {
-            return RpcResultBuilder.<UnsubscribeDtclOutput>failed().withError(
-                    ErrorType.RPC, "data-missing", "No prior listener was registered").buildFuture();
+            return RpcResultBuilder.<UnsubscribeDtclOutput>failed()
+                .withError(ErrorType.RPC, ErrorTag.DATA_MISSING, "No prior listener was registered")
+                .buildFuture();
         }
 
         long timeout = 120L;
@@ -410,8 +461,10 @@ public class MdsalLowLevelTestProvider implements OdlMdsalLowlevelControlService
         dtclReg = null;
 
         if (!idIntsListener.hasTriggered()) {
-            return RpcResultBuilder.<UnsubscribeDtclOutput>failed().withError(ErrorType.APPLICATION, "operation-failed",
-                    "id-ints listener has not received any notifications.").buildFuture();
+            return RpcResultBuilder.<UnsubscribeDtclOutput>failed()
+                .withError(ErrorType.APPLICATION, ErrorTag.OPERATION_FAILED,
+                    "id-ints listener has not received any notifications.")
+                .buildFuture();
         }
 
         try (DOMDataTreeReadTransaction rTx = domDataBroker.newReadOnlyTransaction()) {
@@ -419,17 +472,18 @@ public class MdsalLowLevelTestProvider implements OdlMdsalLowlevelControlService
                 WriteTransactionsHandler.ID_INT_YID).get();
 
             if (!readResult.isPresent()) {
-                return RpcResultBuilder.<UnsubscribeDtclOutput>failed().withError(ErrorType.APPLICATION, "data-missing",
-                        "No data read from id-ints list").buildFuture();
+                return RpcResultBuilder.<UnsubscribeDtclOutput>failed()
+                    .withError(ErrorType.APPLICATION, ErrorTag.DATA_MISSING, "No data read from id-ints list")
+                    .buildFuture();
             }
 
-            final boolean nodesEqual = idIntsListener.checkEqual(readResult.get());
+            final boolean nodesEqual = idIntsListener.checkEqual(readResult.orElseThrow());
             if (!nodesEqual) {
                 LOG.error("Final read of id-int does not match IdIntsListener's copy. {}",
-                        idIntsListener.diffWithLocalCopy(readResult.get()));
+                        idIntsListener.diffWithLocalCopy(readResult.orElseThrow()));
             }
 
-            return RpcResultBuilder.success(new UnsubscribeDtclOutputBuilder().setCopyMatches(nodesEqual))
+            return RpcResultBuilder.success(new UnsubscribeDtclOutputBuilder().setCopyMatches(nodesEqual).build())
                     .buildFuture();
 
         } catch (final InterruptedException | ExecutionException e) {
@@ -439,33 +493,32 @@ public class MdsalLowLevelTestProvider implements OdlMdsalLowlevelControlService
         }
     }
 
-    @Override
-    public ListenableFuture<RpcResult<UnsubscribeYnlOutput>> unsubscribeYnl(final UnsubscribeYnlInput input) {
+    private ListenableFuture<RpcResult<UnsubscribeYnlOutput>> unsubscribeYnl(final UnsubscribeYnlInput input) {
         LOG.info("In unsubscribeYnl - input: {}", input);
 
         if (!ynlRegistrations.containsKey(input.getId())) {
-            return RpcResultBuilder.<UnsubscribeYnlOutput>failed().withError(
-                ErrorType.RPC, "data-missing", "No prior listener was registered for " + input.getId()).buildFuture();
+            return RpcResultBuilder.<UnsubscribeYnlOutput>failed()
+                .withError(ErrorType.RPC, ErrorTag.DATA_MISSING,
+                    "No prior listener was registered for " + input.getId())
+                .buildFuture();
         }
 
-        final ListenerRegistration<YnlListener> reg = ynlRegistrations.remove(input.getId());
-        final UnsubscribeYnlOutput output = reg.getInstance().getOutput();
-
-        reg.close();
-
-        return RpcResultBuilder.<UnsubscribeYnlOutput>success().withResult(output).buildFuture();
+        try (var reg = ynlRegistrations.remove(input.getId())) {
+            return RpcResultBuilder.<UnsubscribeYnlOutput>success()
+                .withResult(reg.getInstance().getOutput())
+                .buildFuture();
+        }
     }
 
-    @Override
-    public ListenableFuture<RpcResult<CheckPublishNotificationsOutput>> checkPublishNotifications(
+    private ListenableFuture<RpcResult<CheckPublishNotificationsOutput>> checkPublishNotifications(
             final CheckPublishNotificationsInput input) {
         LOG.info("In checkPublishNotifications - input: {}", input);
 
         final PublishNotificationsTask task = publishNotificationsTasks.get(input.getId());
 
         if (task == null) {
-            return Futures.immediateFuture(RpcResultBuilder.success(
-                    new CheckPublishNotificationsOutputBuilder().setActive(false)).build());
+            return RpcResultBuilder.success(new CheckPublishNotificationsOutputBuilder().setActive(false).build())
+                .buildFuture();
         }
 
         final CheckPublishNotificationsOutputBuilder checkPublishNotificationsOutputBuilder =
@@ -482,15 +535,15 @@ public class MdsalLowLevelTestProvider implements OdlMdsalLowlevelControlService
         return RpcResultBuilder.success(output).buildFuture();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<ShutdownShardReplicaOutput>> shutdownShardReplica(
+    private ListenableFuture<RpcResult<ShutdownShardReplicaOutput>> shutdownShardReplica(
             final ShutdownShardReplicaInput input) {
         LOG.info("In shutdownShardReplica - input: {}", input);
 
         final String shardName = input.getShardName();
         if (Strings.isNullOrEmpty(shardName)) {
-            return RpcResultBuilder.<ShutdownShardReplicaOutput>failed().withError(ErrorType.RPC, "bad-element",
-                shardName + "is not a valid shard name").buildFuture();
+            return RpcResultBuilder.<ShutdownShardReplicaOutput>failed()
+                .withError(ErrorType.RPC, ErrorTag.BAD_ELEMENT, shardName + "is not a valid shard name")
+                .buildFuture();
         }
 
         return shutdownShardGracefully(shardName, new ShutdownShardReplicaOutputBuilder().build());
@@ -503,7 +556,7 @@ public class MdsalLowLevelTestProvider implements OdlMdsalLowlevelControlService
         long timeoutInMS = Math.max(context.getDatastoreContext().getShardRaftConfig()
                 .getElectionTimeOutInterval().$times(3).toMillis(), 10000);
         final FiniteDuration duration = FiniteDuration.apply(timeoutInMS, TimeUnit.MILLISECONDS);
-        final scala.concurrent.Promise<Boolean> shutdownShardAsk = akka.dispatch.Futures.promise();
+        final scala.concurrent.Promise<Boolean> shutdownShardAsk = Futures.promise();
 
         context.findLocalShardAsync(shardName).onComplete(new OnComplete<ActorRef>() {
             @Override
@@ -533,33 +586,32 @@ public class MdsalLowLevelTestProvider implements OdlMdsalLowlevelControlService
         return rpcResult;
     }
 
-    @Override
-    public ListenableFuture<RpcResult<RegisterConstantOutput>> registerConstant(final RegisterConstantInput input) {
+    private ListenableFuture<RpcResult<RegisterConstantOutput>> registerConstant(final RegisterConstantInput input) {
         LOG.info("In registerConstant - input: {}", input);
 
         if (input.getConstant() == null) {
-            return RpcResultBuilder.<RegisterConstantOutput>failed().withError(
-                    ErrorType.RPC, "invalid-value", "Constant value is null").buildFuture();
+            return RpcResultBuilder.<RegisterConstantOutput>failed()
+                .withError(ErrorType.RPC, ErrorTag.INVALID_VALUE, "Constant value is null")
+                .buildFuture();
         }
 
         if (globalGetConstantRegistration != null) {
-            return RpcResultBuilder.<RegisterConstantOutput>failed().withError(ErrorType.RPC,
-                    "data-exists", "There is already an rpc registered").buildFuture();
+            return RpcResultBuilder.<RegisterConstantOutput>failed()
+                .withError(ErrorType.RPC, ErrorTag.DATA_EXISTS, "There is already an rpc registered")
+                .buildFuture();
         }
 
         globalGetConstantRegistration = GetConstantService.registerNew(domRpcService, input.getConstant());
         return RpcResultBuilder.success(new RegisterConstantOutputBuilder().build()).buildFuture();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<UnregisterDefaultConstantOutput>> unregisterDefaultConstant(
+    private ListenableFuture<RpcResult<UnregisterDefaultConstantOutput>> unregisterDefaultConstant(
             final UnregisterDefaultConstantInput input) {
         throw new UnsupportedOperationException();
     }
 
-    @Override
     @SuppressWarnings("checkstyle:IllegalCatch")
-    public ListenableFuture<RpcResult<UnsubscribeDdtlOutput>> unsubscribeDdtl(final UnsubscribeDdtlInput input) {
+    private ListenableFuture<RpcResult<UnsubscribeDdtlOutput>> unsubscribeDdtl(final UnsubscribeDdtlInput input) {
         throw new UnsupportedOperationException();
     }
 }
diff --git a/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/PurchaseCarProvider.java b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/PurchaseCarProvider.java
deleted file mode 100644 (file)
index 055dc6a..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.clustering.it.provider;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import org.opendaylight.mdsal.binding.api.NotificationPublishService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCarInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCarOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCarOutputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarBoughtBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseService;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class PurchaseCarProvider implements CarPurchaseService, AutoCloseable {
-    private static final Logger LOG = LoggerFactory.getLogger(PurchaseCarProvider.class);
-
-    private final NotificationPublishService notificationProvider;
-
-    public PurchaseCarProvider(final NotificationPublishService notificationProvider) {
-        this.notificationProvider = requireNonNull(notificationProvider);
-    }
-
-    @Override
-    public ListenableFuture<RpcResult<BuyCarOutput>> buyCar(final BuyCarInput input) {
-        LOG.info("Routed RPC buyCar : generating notification for buying car [{}]", input);
-
-        return Futures.transform(notificationProvider.offerNotification(new CarBoughtBuilder()
-            .setCarId(input.getCarId())
-            .setPersonId(input.getPersonId())
-            .build()),
-            result -> RpcResultBuilder.success(new BuyCarOutputBuilder().build()).build(),
-            MoreExecutors.directExecutor());
-    }
-
-    @Override
-    public void close() {
-
-    }
-}
index 90ce618f3d14a403146e44a1c4ed07401eea2f17..afd2d3d0ecacefebffeadd90839c7de60484b439 100644 (file)
@@ -5,37 +5,35 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.clustering.it.provider.impl;
 
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonService;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceRegistration;
-import org.opendaylight.mdsal.singleton.common.api.ServiceGroupIdentifier;
+import org.opendaylight.mdsal.singleton.api.ClusterSingletonService;
+import org.opendaylight.mdsal.singleton.api.ClusterSingletonServiceProvider;
+import org.opendaylight.mdsal.singleton.api.ServiceGroupIdentifier;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class FlappingSingletonService implements ClusterSingletonService {
-
+public final class FlappingSingletonService implements ClusterSingletonService {
     private static final Logger LOG = LoggerFactory.getLogger(FlappingSingletonService.class);
-
     private static final ServiceGroupIdentifier SERVICE_GROUP_IDENTIFIER =
-            ServiceGroupIdentifier.create("flapping-singleton-service");
+            new ServiceGroupIdentifier("flapping-singleton-service");
 
     private final ClusterSingletonServiceProvider singletonServiceProvider;
     private final AtomicBoolean active = new AtomicBoolean(true);
-
     private final AtomicLong flapCount = new AtomicLong();
-    private volatile ClusterSingletonServiceRegistration registration;
+
+    private volatile Registration registration;
 
     public FlappingSingletonService(final ClusterSingletonServiceProvider singletonServiceProvider) {
         LOG.debug("Registering flapping-singleton-service.");
-
-        this.singletonServiceProvider = singletonServiceProvider;
+        this.singletonServiceProvider = requireNonNull(singletonServiceProvider);
         registration = singletonServiceProvider.registerClusterSingletonService(this);
     }
 
index d6c39f1996f4200e3f83145ba593c0a3f74ffee6..54320705ac1c9ea68640703a2472ed75814c11d4 100644 (file)
@@ -11,28 +11,24 @@ import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
 import org.opendaylight.mdsal.dom.api.DOMRpcImplementation;
-import org.opendaylight.mdsal.dom.api.DOMRpcImplementationRegistration;
 import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
 import org.opendaylight.mdsal.dom.api.DOMRpcResult;
 import org.opendaylight.mdsal.dom.spi.DefaultDOMRpcResult;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.common.QNameModule;
-import org.opendaylight.yangtools.yang.common.Revision;
-import org.opendaylight.yangtools.yang.common.XMLNamespace;
 import org.opendaylight.yangtools.yang.common.YangConstants;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafNodeBuilder;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public final class GetConstantService implements DOMRpcImplementation {
     private static final Logger LOG = LoggerFactory.getLogger(GetConstantService.class);
 
-    private static final QNameModule MODULE = QNameModule.create(
-        XMLNamespace.of("tag:opendaylight.org,2017:controller:yang:lowlevel:target"), Revision.of("2017-02-15"))
-        .intern();
+    private static final QNameModule MODULE =
+        QNameModule.ofRevision("tag:opendaylight.org,2017:controller:yang:lowlevel:target", "2017-02-15").intern();
 
     private static final QName OUTPUT = YangConstants.operationOutputQName(MODULE).intern();
     private static final QName CONSTANT = QName.create(MODULE, "constant").intern();
@@ -44,23 +40,19 @@ public final class GetConstantService implements DOMRpcImplementation {
         this.constant = constant;
     }
 
-    public static DOMRpcImplementationRegistration<GetConstantService> registerNew(
-            final DOMRpcProviderService rpcProviderService, final String constant) {
+    public static Registration registerNew(final DOMRpcProviderService rpcProviderService, final String constant) {
         LOG.debug("Registering get-constant service, constant value: {}", constant);
         return rpcProviderService.registerRpcImplementation(new GetConstantService(constant),
             DOMRpcIdentifier.create(GET_CONSTANT));
     }
 
     @Override
-    public ListenableFuture<DOMRpcResult> invokeRpc(final DOMRpcIdentifier rpc, final NormalizedNode input) {
+    public ListenableFuture<DOMRpcResult> invokeRpc(final DOMRpcIdentifier rpc, final ContainerNode input) {
         LOG.debug("get-constant invoked, current value: {}", constant);
 
-        return Futures.immediateFuture(new DefaultDOMRpcResult(ImmutableContainerNodeBuilder.create()
+        return Futures.immediateFuture(new DefaultDOMRpcResult(ImmutableNodes.newContainerBuilder()
             .withNodeIdentifier(new NodeIdentifier(OUTPUT))
-            .withChild(ImmutableLeafNodeBuilder.create()
-                .withNodeIdentifier(new NodeIdentifier(CONSTANT))
-                .withValue(constant)
-                .build())
+            .withChild(ImmutableNodes.leafNode(CONSTANT, constant))
             .build()));
     }
 }
index e9055456497ead0b6a4ff1e9233ca85e20983870..70f6f7811421164534b6b0b3d78728d97786f2b1 100644 (file)
@@ -12,33 +12,33 @@ import static org.opendaylight.controller.clustering.it.provider.impl.AbstractTr
 
 import com.google.common.util.concurrent.SettableFuture;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.util.Collection;
 import java.util.HashMap;
-import java.util.Map;
+import java.util.List;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.ScheduledFuture;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
-import org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class IdIntsListener implements ClusteredDOMDataTreeChangeListener {
+public final class IdIntsListener implements DOMDataTreeChangeListener {
     private static final Logger LOG = LoggerFactory.getLogger(IdIntsListener.class);
     private static final long SECOND_AS_NANO = 1000000000;
 
-    private volatile NormalizedNode localCopy;
     private final AtomicLong lastNotifTimestamp = new AtomicLong(0);
-    private ScheduledExecutorService executorService;
-    private ScheduledFuture<?> scheduledFuture;
+    private ScheduledExecutorService executorService = null;
+    private ScheduledFuture<?> scheduledFuture = null;
+
+    private volatile NormalizedNode localCopy;
 
     @Override
     public void onInitialData() {
@@ -46,7 +46,7 @@ public class IdIntsListener implements ClusteredDOMDataTreeChangeListener {
     }
 
     @Override
-    public void onDataTreeChanged(final Collection<DataTreeCandidate> changes) {
+    public void onDataTreeChanged(final List<DataTreeCandidate> changes) {
 
         // There should only be one candidate reported
         checkState(changes.size() == 1);
@@ -57,13 +57,12 @@ public class IdIntsListener implements ClusteredDOMDataTreeChangeListener {
         LOG.debug("Received data tree changed");
 
         changes.forEach(change -> {
-            if (change.getRootNode().getDataAfter().isPresent()) {
-                LOG.trace("Received change, data before: {}, data after: {}",
-                        change.getRootNode().getDataBefore().isPresent()
-                                ? change.getRootNode().getDataBefore().get() : "",
-                        change.getRootNode().getDataAfter().get());
-
-                localCopy = change.getRootNode().getDataAfter().get();
+            final var root = change.getRootNode();
+            final var after = root.dataAfter();
+            if (after != null) {
+                final var before = root.dataBefore();
+                LOG.trace("Received change, data before: {}, data after: {}", before != null ? before : "", after);
+                localCopy = after;
             } else {
                 LOG.warn("getDataAfter() is missing from notification. change: {}", change);
             }
@@ -85,7 +84,7 @@ public class IdIntsListener implements ClusteredDOMDataTreeChangeListener {
 
     public Future<Void> tryFinishProcessing() {
         executorService = Executors.newSingleThreadScheduledExecutor();
-        final SettableFuture<Void> settableFuture = SettableFuture.create();
+        final var settableFuture = SettableFuture.<Void>create();
 
         scheduledFuture = executorService.scheduleAtFixedRate(new CheckFinishedTask(settableFuture),
                 0, 1, TimeUnit.SECONDS);
@@ -95,43 +94,42 @@ public class IdIntsListener implements ClusteredDOMDataTreeChangeListener {
     public static String diffNodes(final MapNode expected, final MapNode actual) {
         StringBuilder builder = new StringBuilder("MapNodes diff:");
 
-        final YangInstanceIdentifier.NodeIdentifier itemNodeId = new YangInstanceIdentifier.NodeIdentifier(ITEM);
+        final var itemNodeId = new NodeIdentifier(ITEM);
 
-        Map<NodeIdentifierWithPredicates, MapEntryNode> expIdIntMap = new HashMap<>();
-        expected.body().forEach(node -> expIdIntMap.put(node.getIdentifier(), node));
+        final var expIdIntMap = new HashMap<NodeIdentifierWithPredicates, MapEntryNode>();
+        expected.body().forEach(node -> expIdIntMap.put(node.name(), node));
 
         actual.body().forEach(actIdInt -> {
-            final MapEntryNode expIdInt = expIdIntMap.remove(actIdInt.getIdentifier());
+            final var expIdInt = expIdIntMap.remove(actIdInt.name());
             if (expIdInt == null) {
-                builder.append('\n').append("  Unexpected id-int entry for ").append(actIdInt.getIdentifier());
+                builder.append('\n').append("  Unexpected id-int entry for ").append(actIdInt.name());
                 return;
             }
 
-            Map<NodeIdentifierWithPredicates, MapEntryNode> expItemMap = new HashMap<>();
-            ((MapNode)expIdInt.findChildByArg(itemNodeId).get()).body()
-                .forEach(node -> expItemMap.put(node.getIdentifier(), node));
+            final var expItemMap = new HashMap<NodeIdentifierWithPredicates, MapEntryNode>();
+            ((MapNode)expIdInt.getChildByArg(itemNodeId)).body()
+                .forEach(node -> expItemMap.put(node.name(), node));
 
-            ((MapNode)actIdInt.findChildByArg(itemNodeId).get()).body().forEach(actItem -> {
-                final MapEntryNode expItem = expItemMap.remove(actItem.getIdentifier());
+            ((MapNode)actIdInt.getChildByArg(itemNodeId)).body().forEach(actItem -> {
+                final var expItem = expItemMap.remove(actItem.name());
                 if (expItem == null) {
-                    builder.append('\n').append("  Unexpected item entry ").append(actItem.getIdentifier())
-                        .append(" for id-int entry ").append(actIdInt.getIdentifier());
+                    builder.append('\n').append("  Unexpected item entry ").append(actItem.name())
+                        .append(" for id-int entry ").append(actIdInt.name());
                 }
             });
 
             expItemMap.values().forEach(node -> builder.append('\n')
-                .append("  Actual is missing item entry ").append(node.getIdentifier())
-                    .append(" for id-int entry ").append(actIdInt.getIdentifier()));
+                .append("  Actual is missing item entry ").append(node.name())
+                    .append(" for id-int entry ").append(actIdInt.name()));
         });
 
         expIdIntMap.values().forEach(node -> builder.append('\n')
-            .append("  Actual is missing id-int entry for ").append(node.getIdentifier()));
+            .append("  Actual is missing id-int entry for ").append(node.name()));
 
         return builder.toString();
     }
 
-    private class CheckFinishedTask implements Runnable {
-
+    private final class CheckFinishedTask implements Runnable {
         private final SettableFuture<Void> future;
 
         CheckFinishedTask(final SettableFuture<Void> future) {
index 79250b6dd08056ea959de75104459b0e9695b21e..043325fea285b2bbbf78db62c98e4545862683e9 100644 (file)
@@ -20,10 +20,9 @@ import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.l
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class PublishNotificationsTask implements Runnable {
-
+public final class PublishNotificationsTask implements Runnable {
     private static final Logger LOG = LoggerFactory.getLogger(PublishNotificationsTask.class);
-    private static final int SECOND_AS_NANO = 1000000000;
+    private static final int SECOND_AS_NANO = 1_000_000_000;
 
     private final NotificationPublishService notificationPublishService;
     private final String notificationId;
@@ -44,9 +43,9 @@ public class PublishNotificationsTask implements Runnable {
         this.notificationPublishService = requireNonNull(notificationPublishService);
         this.notificationId = requireNonNull(notificationId);
         checkArgument(secondsToTake > 0);
-        this.timeToTake = secondsToTake * SECOND_AS_NANO;
+        timeToTake = secondsToTake * SECOND_AS_NANO;
         checkArgument(maxPerSecond > 0);
-        this.delay = SECOND_AS_NANO / maxPerSecond;
+        delay = SECOND_AS_NANO / maxPerSecond;
 
         LOG.debug("Delay : {}", delay);
     }
index 72888cd10daa625f2b9f3b814967354738435748..e3c1b20ff38ac19d641ec06327639718d6950a95 100644 (file)
@@ -12,30 +12,25 @@ import com.google.common.util.concurrent.ListenableFuture;
 import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
 import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
 import org.opendaylight.mdsal.dom.api.DOMRpcImplementation;
-import org.opendaylight.mdsal.dom.api.DOMRpcImplementationRegistration;
 import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
 import org.opendaylight.mdsal.dom.api.DOMRpcResult;
 import org.opendaylight.mdsal.dom.spi.DefaultDOMRpcResult;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.common.QNameModule;
-import org.opendaylight.yangtools.yang.common.Revision;
-import org.opendaylight.yangtools.yang.common.XMLNamespace;
 import org.opendaylight.yangtools.yang.common.YangConstants;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafNodeBuilder;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public final class RoutedGetConstantService implements DOMRpcImplementation {
     private static final Logger LOG = LoggerFactory.getLogger(RoutedGetConstantService.class);
 
-    private static final QNameModule MODULE = QNameModule.create(
-        XMLNamespace.of("tag:opendaylight.org,2017:controller:yang:lowlevel:target"), Revision.of("2017-02-15"))
-        .intern();
+    private static final QNameModule MODULE =
+        QNameModule.ofRevision("tag:opendaylight.org,2017:controller:yang:lowlevel:target", "2017-02-15").intern();
     private static final QName OUTPUT = YangConstants.operationOutputQName(MODULE).intern();
     private static final QName CONSTANT = QName.create(MODULE, "constant").intern();
     private static final QName GET_CONTEXTED_CONSTANT = QName.create(MODULE, "get-contexted-constant").intern();
@@ -46,28 +41,25 @@ public final class RoutedGetConstantService implements DOMRpcImplementation {
         this.constant = constant;
     }
 
-    public static DOMRpcImplementationRegistration<RoutedGetConstantService> registerNew(
-            final BindingNormalizedNodeSerializer codec, final DOMRpcProviderService rpcProviderService,
-            final String constant, final InstanceIdentifier<?> context) {
+    public static Registration registerNew(final BindingNormalizedNodeSerializer codec,
+            final DOMRpcProviderService rpcProviderService, final String constant,
+            final InstanceIdentifier<?> context) {
 
         LOG.debug("Registering get-contexted-constant on context: {}, with value: {}", context, constant);
 
-        final YangInstanceIdentifier yid = codec.toYangInstanceIdentifier(context);
-        final DOMRpcIdentifier id = DOMRpcIdentifier.create(GET_CONTEXTED_CONSTANT, yid);
+        final var yid = codec.toYangInstanceIdentifier(context);
+        final var id = DOMRpcIdentifier.create(GET_CONTEXTED_CONSTANT, yid);
 
         return rpcProviderService.registerRpcImplementation(new RoutedGetConstantService(constant), id);
     }
 
     @Override
-    public ListenableFuture<DOMRpcResult> invokeRpc(final DOMRpcIdentifier rpc, final NormalizedNode input) {
+    public ListenableFuture<DOMRpcResult> invokeRpc(final DOMRpcIdentifier rpc, final ContainerNode input) {
         LOG.debug("get-contexted-constant invoked, current value: {}", constant);
 
-        return Futures.immediateFuture(new DefaultDOMRpcResult(ImmutableContainerNodeBuilder.create()
+        return Futures.immediateFuture(new DefaultDOMRpcResult(ImmutableNodes.newContainerBuilder()
             .withNodeIdentifier(new NodeIdentifier(OUTPUT))
-            .withChild(ImmutableLeafNodeBuilder.create()
-                .withNodeIdentifier(new NodeIdentifier(CONSTANT))
-                .withValue(constant)
-                .build())
+            .withChild(ImmutableNodes.leafNode(CONSTANT, constant))
             .build()));
     }
 }
index 31d002268c1297944ebc7d610e8e78bc17d83e28..9177cc6ec7a2370de82887ca5fd7dc5fd9002573 100644 (file)
@@ -11,53 +11,47 @@ import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
 import org.opendaylight.mdsal.dom.api.DOMRpcImplementation;
-import org.opendaylight.mdsal.dom.api.DOMRpcImplementationRegistration;
 import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
 import org.opendaylight.mdsal.dom.api.DOMRpcResult;
 import org.opendaylight.mdsal.dom.spi.DefaultDOMRpcResult;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonService;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceRegistration;
-import org.opendaylight.mdsal.singleton.common.api.ServiceGroupIdentifier;
+import org.opendaylight.mdsal.singleton.api.ClusterSingletonService;
+import org.opendaylight.mdsal.singleton.api.ClusterSingletonServiceProvider;
+import org.opendaylight.mdsal.singleton.api.ServiceGroupIdentifier;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.common.QNameModule;
-import org.opendaylight.yangtools.yang.common.Revision;
-import org.opendaylight.yangtools.yang.common.XMLNamespace;
 import org.opendaylight.yangtools.yang.common.YangConstants;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafNodeBuilder;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public final class SingletonGetConstantService implements DOMRpcImplementation, ClusterSingletonService {
-
     private static final Logger LOG = LoggerFactory.getLogger(SingletonGetConstantService.class);
 
-    private static final QNameModule MODULE = QNameModule.create(
-        XMLNamespace.of("tag:opendaylight.org,2017:controller:yang:lowlevel:target"), Revision.of("2017-02-15"))
-        .intern();
+    private static final QNameModule MODULE =
+        QNameModule.ofRevision("tag:opendaylight.org,2017:controller:yang:lowlevel:target", "2017-02-15").intern();
     private static final QName OUTPUT = YangConstants.operationOutputQName(MODULE).intern();
     private static final QName CONSTANT = QName.create(MODULE, "constant").intern();
     private static final QName CONTEXT = QName.create(MODULE, "context").intern();
     private static final QName GET_SINGLETON_CONSTANT = QName.create(MODULE, "get-singleton-constant").intern();
 
     private static final ServiceGroupIdentifier SERVICE_GROUP_IDENTIFIER =
-            ServiceGroupIdentifier.create("get-singleton-constant-service");
+        new ServiceGroupIdentifier("get-singleton-constant-service");
 
     private final DOMRpcProviderService rpcProviderService;
     private final String constant;
-    private DOMRpcImplementationRegistration<SingletonGetConstantService> rpcRegistration;
+
+    private Registration rpcRegistration = null;
 
     private SingletonGetConstantService(final DOMRpcProviderService rpcProviderService, final String constant) {
         this.rpcProviderService = rpcProviderService;
         this.constant = constant;
     }
 
-    public static ClusterSingletonServiceRegistration registerNew(
-            final ClusterSingletonServiceProvider singletonService, final DOMRpcProviderService rpcProviderService,
-            final String constant) {
+    public static Registration registerNew(final ClusterSingletonServiceProvider singletonService,
+            final DOMRpcProviderService rpcProviderService, final String constant) {
         LOG.debug("Registering get-singleton-constant into ClusterSingletonService, value {}", constant);
 
         return singletonService.registerClusterSingletonService(
@@ -65,15 +59,12 @@ public final class SingletonGetConstantService implements DOMRpcImplementation,
     }
 
     @Override
-    public ListenableFuture<DOMRpcResult> invokeRpc(final DOMRpcIdentifier rpc, final NormalizedNode input) {
+    public ListenableFuture<DOMRpcResult> invokeRpc(final DOMRpcIdentifier rpc, final ContainerNode input) {
         LOG.debug("get-singleton-constant invoked, current value: {}", constant);
 
-        return Futures.immediateFuture(new DefaultDOMRpcResult(ImmutableContainerNodeBuilder.create()
+        return Futures.immediateFuture(new DefaultDOMRpcResult(ImmutableNodes.newContainerBuilder()
             .withNodeIdentifier(new NodeIdentifier(OUTPUT))
-            .withChild(ImmutableLeafNodeBuilder.create()
-                .withNodeIdentifier(new NodeIdentifier(CONSTANT))
-                .withValue(constant)
-                .build())
+            .withChild(ImmutableNodes.leafNode(CONSTANT, constant))
             .build()));
     }
 
index 6354f8497f0e6be7170472ca606c1c30f8287dc6..97ca77944f2334e636ff28ff409d37305cb641bf 100644 (file)
@@ -8,8 +8,10 @@
 package org.opendaylight.controller.clustering.it.provider.impl;
 
 import static java.util.Objects.requireNonNull;
+import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntryBuilder;
 
 import com.google.common.util.concurrent.FluentFuture;
+import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.SettableFuture;
 import java.util.LinkedHashSet;
@@ -25,14 +27,13 @@ import org.opendaylight.mdsal.common.api.CommitInfo;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.mdsal.common.api.OptimisticLockFailedException;
 import org.opendaylight.mdsal.dom.api.DOMDataBroker;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeTransaction;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
 import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.WriteTransactionsInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.WriteTransactionsOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.WriteTransactionsOutputBuilder;
-import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.common.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
@@ -40,22 +41,20 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdent
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.SystemMapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.builder.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public abstract class WriteTransactionsHandler extends AbstractTransactionHandler {
-    private static final class Chained extends WriteTransactionsHandler implements DOMTransactionChainListener {
+    private static final class Chained extends WriteTransactionsHandler implements FutureCallback<Empty> {
         private final SplittableRandom random = new SplittableRandom();
         private final DOMTransactionChain transactionChain;
 
         Chained(final DOMDataBroker dataBroker, final YangInstanceIdentifier idListItem,
             final WriteTransactionsInput input) {
             super(idListItem, input);
-            transactionChain = dataBroker.createTransactionChain(this);
+            transactionChain = dataBroker.createTransactionChain();
+            transactionChain.addCallback(this);
         }
 
         @Override
@@ -69,15 +68,14 @@ public abstract class WriteTransactionsHandler extends AbstractTransactionHandle
         }
 
         @Override
-        public void onTransactionChainFailed(final DOMTransactionChain chain, final DOMDataTreeTransaction transaction,
-                final Throwable cause) {
+        public void onFailure(final Throwable cause) {
             // This is expected to happen frequently in isolation testing.
             LOG.debug("Transaction chain failed.", cause);
             // Do not return RPC here, rely on transaction failure to call runFailed.
         }
 
         @Override
-        public void onTransactionChainSuccessful(final DOMTransactionChain chain) {
+        public void onSuccess(final Empty result) {
             LOG.debug("Transaction chain closed successfully.");
         }
     }
@@ -133,14 +131,18 @@ public abstract class WriteTransactionsHandler extends AbstractTransactionHandle
         LOG.info("Starting write transactions with input {}", input);
 
         final String id = input.getId();
-        final MapEntryNode entry = ImmutableNodes.mapEntryBuilder(ID_INT, ID, id)
-                .withChild(ImmutableNodes.mapNodeBuilder(ITEM).build())
+        final MapEntryNode entry = mapEntryBuilder(ID_INT, ID, id)
+                .withChild(ImmutableNodes.newSystemMapBuilder()
+                    .withNodeIdentifier(new NodeIdentifier(ITEM))
+                    .build())
                 .build();
-        final YangInstanceIdentifier idListItem = ID_INT_YID.node(entry.getIdentifier());
+        final YangInstanceIdentifier idListItem = ID_INT_YID.node(entry.name());
 
-        final ContainerNode containerNode = ImmutableContainerNodeBuilder.create()
+        final ContainerNode containerNode = ImmutableNodes.newContainerBuilder()
                 .withNodeIdentifier(new NodeIdentifier(ID_INTS))
-                .withChild(ImmutableNodes.mapNodeBuilder(ID_INT).build())
+                .withChild(ImmutableNodes.newSystemMapBuilder()
+                    .withNodeIdentifier(new NodeIdentifier(ID_INT))
+                    .build())
                 .build();
 
         DOMDataTreeWriteTransaction tx = domDataBroker.newWriteOnlyTransaction();
@@ -150,7 +152,7 @@ public abstract class WriteTransactionsHandler extends AbstractTransactionHandle
             tx.commit().get(INIT_TX_TIMEOUT_SECONDS, TimeUnit.SECONDS);
         } catch (InterruptedException | TimeoutException e) {
             LOG.error("Error writing top-level path {}: {}", ID_INTS_YID, containerNode, e);
-            return RpcResultBuilder.<WriteTransactionsOutput>failed().withError(RpcError.ErrorType.APPLICATION,
+            return RpcResultBuilder.<WriteTransactionsOutput>failed().withError(ErrorType.APPLICATION,
                 String.format("Could not start write transactions - error writing top-level path %s:  %s",
                     ID_INTS_YID, containerNode), e).buildFuture();
         } catch (ExecutionException e) {
@@ -161,7 +163,7 @@ public abstract class WriteTransactionsHandler extends AbstractTransactionHandle
                 LOG.debug("Got an optimistic lock when writing initial top level list element.", e);
             } else {
                 LOG.error("Error writing top-level path {}: {}", ID_INTS_YID, containerNode, e);
-                return RpcResultBuilder.<WriteTransactionsOutput>failed().withError(RpcError.ErrorType.APPLICATION,
+                return RpcResultBuilder.<WriteTransactionsOutput>failed().withError(ErrorType.APPLICATION,
                     String.format("Could not start write transactions - error writing top-level path %s:  %s",
                         ID_INTS_YID, containerNode), e).buildFuture();
             }
@@ -174,25 +176,25 @@ public abstract class WriteTransactionsHandler extends AbstractTransactionHandle
             tx.commit().get(INIT_TX_TIMEOUT_SECONDS, TimeUnit.SECONDS);
         } catch (InterruptedException | ExecutionException | TimeoutException e) {
             LOG.error("Error writing top-level path {}: {}", idListItem, entry, e);
-            return RpcResultBuilder.<WriteTransactionsOutput>failed().withError(RpcError.ErrorType.APPLICATION,
+            return RpcResultBuilder.<WriteTransactionsOutput>failed().withError(ErrorType.APPLICATION,
                 String.format("Could not start write transactions - error writing list entry path %s: %s",
                     idListItem, entry), e).buildFuture();
         }
 
         LOG.debug("Filling the item list with initial values.");
 
-        final CollectionNodeBuilder<MapEntryNode, SystemMapNode> mapBuilder = ImmutableNodes.mapNodeBuilder(ITEM);
-
         final YangInstanceIdentifier itemListId = idListItem.node(ITEM);
         tx = domDataBroker.newWriteOnlyTransaction();
-        final MapNode itemListNode = mapBuilder.build();
+        final MapNode itemListNode = ImmutableNodes.newSystemMapBuilder()
+            .withNodeIdentifier(new NodeIdentifier(ITEM))
+            .build();
         tx.put(LogicalDatastoreType.CONFIGURATION, itemListId, itemListNode);
 
         try {
             tx.commit().get(INIT_TX_TIMEOUT_SECONDS, TimeUnit.SECONDS);
         } catch (InterruptedException | ExecutionException | TimeoutException e) {
             LOG.error("Error filling initial item list path {}: {}", itemListId, itemListNode, e);
-            return RpcResultBuilder.<WriteTransactionsOutput>failed().withError(RpcError.ErrorType.APPLICATION,
+            return RpcResultBuilder.<WriteTransactionsOutput>failed().withError(ErrorType.APPLICATION,
                 String.format("Could not start write transactions - error filling initial item list path %s: %s",
                     itemListId, itemListNode), e).buildFuture();
         }
@@ -228,8 +230,7 @@ public abstract class WriteTransactionsHandler extends AbstractTransactionHandle
         } else {
             LOG.debug("Inserting item: {}", i);
             insertTx.incrementAndGet();
-            final MapEntryNode entry = ImmutableNodes.mapEntry(ITEM, NUMBER, i);
-            tx.put(LogicalDatastoreType.CONFIGURATION, entryId, entry);
+            tx.put(LogicalDatastoreType.CONFIGURATION, entryId, mapEntryBuilder(ITEM, NUMBER, i).build());
             usedValues.add(i);
         }
 
@@ -239,7 +240,7 @@ public abstract class WriteTransactionsHandler extends AbstractTransactionHandle
     @Override
     void runFailed(final Throwable cause, final long txId) {
         completionFuture.set(RpcResultBuilder.<WriteTransactionsOutput>failed()
-            .withError(RpcError.ErrorType.APPLICATION, "Commit failed for tx # " + txId, cause).build());
+            .withError(ErrorType.APPLICATION, "Commit failed for tx # " + txId, cause).build());
     }
 
     @Override
@@ -257,7 +258,7 @@ public abstract class WriteTransactionsHandler extends AbstractTransactionHandle
     @Override
     void runTimedOut(final String cause) {
         completionFuture.set(RpcResultBuilder.<WriteTransactionsOutput>failed()
-            .withError(RpcError.ErrorType.APPLICATION, cause).build());
+            .withError(ErrorType.APPLICATION, cause).build());
     }
 
     abstract DOMDataTreeWriteTransaction createTransaction();
index 331ee91850dc69d3aa4aa98ccda17f7d42403fa9..4dd650ec40e6532f678b29c499a075c8c68d50af 100644 (file)
@@ -10,14 +10,14 @@ package org.opendaylight.controller.clustering.it.provider.impl;
 import static java.util.Objects.requireNonNull;
 
 import java.util.concurrent.atomic.AtomicLong;
+import org.opendaylight.mdsal.binding.api.NotificationService.Listener;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeYnlOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeYnlOutputBuilder;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.IdSequence;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.OdlMdsalLowlevelTargetListener;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class YnlListener implements OdlMdsalLowlevelTargetListener {
+public class YnlListener implements Listener<IdSequence> {
     private static final Logger LOG = LoggerFactory.getLogger(YnlListener.class);
 
     private final String id;
@@ -32,7 +32,7 @@ public class YnlListener implements OdlMdsalLowlevelTargetListener {
     }
 
     @Override
-    public void onIdSequence(final IdSequence notification) {
+    public void onNotification(final IdSequence notification) {
         LOG.debug("Received id-sequence notification, : {}", notification);
 
         allNot.incrementAndGet();
diff --git a/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/resources/OSGI-INF/blueprint/cluster-test-app.xml b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/resources/OSGI-INF/blueprint/cluster-test-app.xml
deleted file mode 100644 (file)
index aa7d403..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
-           xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
-           xmlns:cm="http://aries.apache.org/blueprint/xmlns/blueprint-cm/v1.1.0"
-    odl:use-default-for-reference-types="true">
-
-  <reference id="dataBroker" interface="org.opendaylight.mdsal.binding.api.DataBroker"/>
-  <reference id="entityOwnershipService" interface="org.opendaylight.mdsal.eos.binding.api.EntityOwnershipService"/>
-  <reference id="bindingRpcRegistry" interface="org.opendaylight.mdsal.binding.api.RpcProviderService"/>
-  <reference id="domRpcProviderService" interface="org.opendaylight.mdsal.dom.api.DOMRpcProviderService"/>
-  <reference id="clusterSingletonService" interface="org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider"/>
-  <reference id="domDataBroker" interface="org.opendaylight.mdsal.dom.api.DOMDataBroker"/>
-  <reference id="schemaService" interface="org.opendaylight.mdsal.dom.api.DOMSchemaService"/>
-  <reference id="normalizedNodeSerializer" interface="org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer"/>
-  <reference id="notificationPublishService" interface="org.opendaylight.mdsal.binding.api.NotificationPublishService" />
-  <reference id="notificationListenerService" interface="org.opendaylight.mdsal.binding.api.NotificationService" />
-  <reference id="configDatastore" interface="org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface"
-             odl:type="distributed-config"/>
-  <reference id="actorSystemProvider" interface="org.opendaylight.controller.cluster.ActorSystemProvider"/>
-
-
-  <bean id="purchaseCarProvider" class="org.opendaylight.controller.clustering.it.provider.PurchaseCarProvider" >
-    <argument ref="notificationPublishService"/>
-  </bean>
-
-  <bean id="peopleProvider" class="org.opendaylight.controller.clustering.it.provider.PeopleProvider"
-        destroy-method="close">
-    <argument ref="dataBroker"/>
-    <argument ref="bindingRpcRegistry"/>
-    <argument ref="purchaseCarProvider"/>
-  </bean>
-
-  <bean id="carProvider" class="org.opendaylight.controller.clustering.it.provider.CarProvider"
-        destroy-method="close">
-    <argument ref="dataBroker"/>
-    <argument ref="entityOwnershipService"/>
-    <argument ref="domDataBroker"/>
-  </bean>
-
-  <odl:rpc-implementation ref="carProvider"/>
-  <odl:rpc-implementation ref="peopleProvider"/>
-
-  <bean id="peopleCarListener" class="org.opendaylight.controller.clustering.it.listener.PeopleCarListener" >
-    <property name="dataProvider" ref="dataBroker"/>
-  </bean>
-
-  <odl:notification-listener ref="peopleCarListener"/>
-
-  <bean id="basicTestProvider" class="org.opendaylight.controller.clustering.it.provider.BasicRpcTestProvider">
-    <argument ref="bindingRpcRegistry"/>
-    <argument ref="clusterSingletonService"/>
-  </bean>
-
-  <bean id="lowLevelTestProvider" class="org.opendaylight.controller.clustering.it.provider.MdsalLowLevelTestProvider">
-    <argument ref="bindingRpcRegistry"/>
-    <argument ref="domRpcProviderService"/>
-    <argument ref="clusterSingletonService"/>
-    <argument ref="schemaService"/>
-    <argument ref="normalizedNodeSerializer"/>
-    <argument ref="notificationPublishService"/>
-    <argument ref="notificationListenerService"/>
-    <argument ref="domDataBroker"/>
-    <argument ref="configDatastore"/>
-    <argument ref="actorSystemProvider"/>
-  </bean>
-
-</blueprint>
index fba639d37c73f19f9d4a65da0427ffd4a8bc13f5..80752933e0502c991609d3a35a1b943f1aafa905 100644 (file)
@@ -4,13 +4,13 @@
   <parent>
     <groupId>org.opendaylight.odlparent</groupId>
     <artifactId>odlparent-lite</artifactId>
-    <version>9.0.12</version>
+    <version>13.0.11</version>
     <relativePath/>
   </parent>
 
   <groupId>org.opendaylight.controller.samples</groupId>
   <artifactId>samples-aggregator</artifactId>
-  <version>5.0.0-SNAPSHOT</version>
+  <version>9.0.3-SNAPSHOT</version>
   <packaging>pom</packaging>
 
   <properties>
index b100353fe673fd787538933228ef97adc9586629..99d9c76c55ed7979a0b1bb9597445054dbd8d953 100644 (file)
@@ -4,7 +4,7 @@
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-parent</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../../parent</relativePath>
   </parent>
 
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>sal-common-util</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>mdsal-binding-api</artifactId>
+    </dependency>
     <dependency>
       <groupId>org.opendaylight.mdsal</groupId>
       <artifactId>yang-binding</artifactId>
     </dependency>
+    <dependency>
+      <groupId>jakarta.annotation</groupId>
+      <artifactId>jakarta.annotation-api</artifactId>
+      <optional>true</optional>
+    </dependency>
+    <dependency>
+      <groupId>org.osgi</groupId>
+      <artifactId>org.osgi.service.component.annotations</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.guicedee.services</groupId>
+      <artifactId>javax.inject</artifactId>
+      <optional>true</optional>
+    </dependency>
   </dependencies>
 
   <scm>
index b4c621797987a5e14da6b24cc238185f708d355f..e3034dd4828b29adb31ab47bf0a1f8de9044f253 100644 (file)
@@ -5,13 +5,13 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.sample.kitchen.api;
 
-import java.util.concurrent.Future;
+import com.google.common.util.concurrent.ListenableFuture;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToastType;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
 public interface KitchenService {
-    Future<RpcResult<Void>> makeBreakfast(EggsType eggs, Class<? extends ToastType> toast, int toastDoneness);
+
+    ListenableFuture<RpcResult<Void>> makeBreakfast(EggsType eggs, ToastType toast, int toastDoneness);
 }
index 306efc5e63dd613b335f935ca168f4bd8a1fd0fa..b67de11367874fd0e3aeb13645fd602fc7ac7a7c 100644 (file)
@@ -11,53 +11,83 @@ import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableList.Builder;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
 import com.google.common.util.concurrent.MoreExecutors;
 import java.util.List;
+import java.util.Set;
 import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
 import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
 import org.opendaylight.controller.sample.kitchen.api.EggsType;
 import org.opendaylight.controller.sample.kitchen.api.KitchenService;
 import org.opendaylight.controller.sample.kitchen.api.KitchenServiceRuntimeMXBean;
-import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToastInput;
+import org.opendaylight.mdsal.binding.api.NotificationService;
+import org.opendaylight.mdsal.binding.api.NotificationService.CompositeListener;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToast;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToastInputBuilder;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToastOutput;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToastOutputBuilder;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToastType;
-import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterListener;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterOutOfBread;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterRestocked;
-import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterService;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.WheatBread;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.common.ErrorTag;
+import org.opendaylight.yangtools.yang.common.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
 import org.opendaylight.yangtools.yang.common.Uint32;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class KitchenServiceImpl extends AbstractMXBean
-        implements KitchenService, KitchenServiceRuntimeMXBean, ToasterListener {
-
+@Singleton
+@Component(service = KitchenService.class, immediate = true)
+public final class KitchenServiceImpl extends AbstractMXBean implements KitchenService, KitchenServiceRuntimeMXBean {
     private static final Logger LOG = LoggerFactory.getLogger(KitchenServiceImpl.class);
     private static final MakeToastOutput EMPTY_MAKE_OUTPUT = new MakeToastOutputBuilder().build();
 
-    private final ToasterService toaster;
-
-    private final ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool());
+    private final ExecutorService executor = Executors.newCachedThreadPool();
+    private final MakeToast makeToast;
+    private final Registration reg;
 
     private volatile boolean toasterOutOfBread;
 
-    public KitchenServiceImpl(final ToasterService toaster) {
+    @Inject
+    @Activate
+    public KitchenServiceImpl(@Reference final RpcService rpcService,
+            @Reference final NotificationService notifService) {
         super("KitchenService", "toaster-consumer", null);
-        this.toaster = toaster;
+        makeToast = rpcService.getRpc(MakeToast.class);
+        reg = notifService.registerCompositeListener(new CompositeListener(Set.of(
+            new CompositeListener.Component<>(ToasterOutOfBread.class, notification -> {
+                LOG.info("ToasterOutOfBread notification");
+                toasterOutOfBread = true;
+            }),
+            new CompositeListener.Component<>(ToasterRestocked.class, notification -> {
+                LOG.info("ToasterRestocked notification - amountOfBread: {}", notification.getAmountOfBread());
+                toasterOutOfBread = false;
+            }))));
+        register();
+    }
+
+    @PreDestroy
+    @Deactivate
+    public void close() {
+        unregister();
+        reg.close();
     }
 
     @Override
-    public Future<RpcResult<Void>> makeBreakfast(final EggsType eggsType, final Class<? extends ToastType> toastType,
+    public ListenableFuture<RpcResult<Void>> makeBreakfast(final EggsType eggsType, final ToastType toastType,
             final int toastDoneness) {
         // Call makeToast, The OpendaylightToaster impl already returns a ListenableFuture so the conversion is
         // actually a no-op.
@@ -86,38 +116,35 @@ public class KitchenServiceImpl extends AbstractMXBean
                 }
             }
 
-            return Futures.immediateFuture(RpcResultBuilder.<Void>status(atLeastOneSucceeded)
-                    .withRpcErrors(errorList.build()).build());
+            return RpcResultBuilder.<Void>status(atLeastOneSucceeded).withRpcErrors(errorList.build()).buildFuture();
         }, MoreExecutors.directExecutor());
     }
 
     private ListenableFuture<RpcResult<Void>> makeEggs(final EggsType eggsType) {
-        return executor.submit(() -> RpcResultBuilder.<Void>success().build());
+        return Futures.submit(() -> RpcResultBuilder.<Void>success().build(), executor);
     }
 
-    private ListenableFuture<RpcResult<MakeToastOutput>> makeToast(final Class<? extends ToastType> toastType,
-            final int toastDoneness) {
-
+    private ListenableFuture<RpcResult<MakeToastOutput>> makeToast(final ToastType toastType, final int toastDoneness) {
         if (toasterOutOfBread) {
             LOG.info("We're out of toast but we can make eggs");
-            return Futures.immediateFuture(RpcResultBuilder.success(EMPTY_MAKE_OUTPUT)
-                .withWarning(ErrorType.APPLICATION, "partial-operation",
-                    "Toaster is out of bread but we can make you eggs").build());
+            return RpcResultBuilder.success(EMPTY_MAKE_OUTPUT)
+                .withWarning(ErrorType.APPLICATION, ErrorTag.PARTIAL_OPERATION,
+                    "Toaster is out of bread but we can make you eggs")
+                .buildFuture();
         }
 
         // Access the ToasterService to make the toast.
-
-        MakeToastInput toastInput = new MakeToastInputBuilder().setToasterDoneness(Uint32.valueOf(toastDoneness))
-                .setToasterToastType(toastType).build();
-
-        return toaster.makeToast(toastInput);
+        return makeToast.invoke(new MakeToastInputBuilder()
+            .setToasterDoneness(Uint32.valueOf(toastDoneness))
+            .setToasterToastType(toastType)
+            .build());
     }
 
     @Override
     public Boolean makeScrambledWithWheat() {
         try {
             // This call has to block since we must return a result to the JMX client.
-            RpcResult<Void> result = makeBreakfast(EggsType.SCRAMBLED, WheatBread.class, 2).get();
+            RpcResult<Void> result = makeBreakfast(EggsType.SCRAMBLED, WheatBread.VALUE, 2).get();
             if (result.isSuccessful()) {
                 LOG.info("makeBreakfast succeeded");
             } else {
@@ -131,22 +158,4 @@ public class KitchenServiceImpl extends AbstractMXBean
 
         return Boolean.FALSE;
     }
-
-    /**
-     * Implemented from the ToasterListener interface.
-     */
-    @Override
-    public void onToasterOutOfBread(final ToasterOutOfBread notification) {
-        LOG.info("ToasterOutOfBread notification");
-        toasterOutOfBread = true;
-    }
-
-    /**
-     * Implemented from the ToasterListener interface.
-     */
-    @Override
-    public void onToasterRestocked(final ToasterRestocked notification) {
-        LOG.info("ToasterRestocked notification - amountOfBread: {}", notification.getAmountOfBread());
-        toasterOutOfBread = false;
-    }
 }
diff --git a/opendaylight/md-sal/samples/toaster-consumer/src/main/resources/OSGI-INF/blueprint/toaster-consumer.xml b/opendaylight/md-sal/samples/toaster-consumer/src/main/resources/OSGI-INF/blueprint/toaster-consumer.xml
deleted file mode 100644 (file)
index 16e8f98..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
-           xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
-    odl:use-default-for-reference-types="true">
-
-  <!-- Retrieves the RPC service for the ToasterService interface -->
-  <odl:rpc-service id="toasterService" interface="org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterService"/>
-
-  <!-- Create the KitchenServiceImpl instance and inject the RPC service identified by "toasterService" -->
-  <bean id="kitchenService" class="org.opendaylight.controller.sample.kitchen.impl.KitchenServiceImpl"
-          init-method="register" destroy-method="unregister">
-    <argument ref="toasterService"/>
-  </bean>
-
-  <!-- Register the KitchenServiceImpl to receive yang notifications -->
-  <odl:notification-listener ref="kitchenService"/>
-
-  <!-- Advertise the KitchenServiceImpl with the OSGi registry with the type property set to "default" . The
-       type property is optional but can be used to distinguish this implementation from any other potential
-       KitchenService implementations (if there were any). Clients consuming the KitchenService can pick the
-       desired implementation via the particular type.
-  -->
-  <service ref="kitchenService" interface="org.opendaylight.controller.sample.kitchen.api.KitchenService"
-          odl:type="default"/>
-</blueprint>
index 60ebe3810377f7eb79ee8c53327023872284c8a0..6b8c35e645eea37d816bbb433c5b534910c34d88 100644 (file)
@@ -5,7 +5,7 @@
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-it-parent</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../../mdsal-it-parent</relativePath>
   </parent>
   <artifactId>sample-toaster-it</artifactId>
index ed2e47b0619c240e1e1115ebb4cb5577dc8ef4ba..0b4ea8e72a13cfabf78bd75fe47f304925de5f94 100644 (file)
@@ -21,6 +21,7 @@ import org.opendaylight.controller.mdsal.it.base.AbstractMdsalTestBase;
 import org.opendaylight.controller.sample.kitchen.api.EggsType;
 import org.opendaylight.controller.sample.kitchen.api.KitchenService;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.HashBrown;
+import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToast;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.WhiteBread;
 import org.ops4j.pax.exam.junit.PaxExam;
 import org.ops4j.pax.exam.options.MavenUrlReference;
@@ -31,6 +32,10 @@ public class ToasterTest extends AbstractMdsalTestBase {
     @Inject
     @Filter(timeout = 60 * 1000)
     KitchenService kitchenService;
+    @Inject
+    @Filter(timeout = 60 * 1000)
+    // proxy for the entire toaster, nothing else
+    MakeToast makeToast;
 
     @Override
     public MavenUrlReference getFeatureRepo() {
@@ -55,8 +60,8 @@ public class ToasterTest extends AbstractMdsalTestBase {
         boolean success = true;
 
         // Make toasts using OSGi service
-        success &= kitchenService.makeBreakfast(EggsType.SCRAMBLED, HashBrown.class, 4).get().isSuccessful();
-        success &= kitchenService.makeBreakfast(EggsType.POACHED, WhiteBread.class, 8).get().isSuccessful();
+        success &= kitchenService.makeBreakfast(EggsType.SCRAMBLED, HashBrown.VALUE, 4).get().isSuccessful();
+        success &= kitchenService.makeBreakfast(EggsType.POACHED, WhiteBread.VALUE, 8).get().isSuccessful();
 
         assertTrue("Not all breakfasts succeeded", success);
 
index bec1258650458d842c9b131101a172b0e8377e17..ea6ee620811c1ed0728f95ab99c5eaf2cfd202c3 100644 (file)
@@ -4,7 +4,7 @@
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-parent</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../../parent</relativePath>
   </parent>
 
       <groupId>org.opendaylight.mdsal</groupId>
       <artifactId>mdsal-binding-test-utils</artifactId>
     </dependency>
+
+    <dependency>
+      <groupId>jakarta.annotation</groupId>
+      <artifactId>jakarta.annotation-api</artifactId>
+      <optional>true</optional>
+    </dependency>
+    <dependency>
+      <groupId>org.osgi</groupId>
+      <artifactId>org.osgi.service.component.annotations</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.guicedee.services</groupId>
+      <artifactId>javax.inject</artifactId>
+      <optional>true</optional>
+    </dependency>
+    <dependency>
+      <groupId>org.osgi</groupId>
+      <artifactId>org.osgi.service.metatype.annotations</artifactId>
+      <scope>compile</scope>
+    </dependency>
   </dependencies>
 
   <scm>
index 20e544903d6d727e1e532f72a8f62fd2d6cad976..cc275a9b006005bf84ff09c0ca9028b986b0d3c0 100644 (file)
@@ -8,11 +8,9 @@
 package org.opendaylight.controller.sample.toaster.provider;
 
 import static java.util.Objects.requireNonNull;
-import static org.opendaylight.mdsal.binding.api.DataObjectModification.ModificationType.DELETE;
-import static org.opendaylight.mdsal.binding.api.DataObjectModification.ModificationType.WRITE;
 import static org.opendaylight.mdsal.common.api.LogicalDatastoreType.CONFIGURATION;
 import static org.opendaylight.mdsal.common.api.LogicalDatastoreType.OPERATIONAL;
-import static org.opendaylight.yangtools.yang.common.RpcError.ErrorType.APPLICATION;
+import static org.opendaylight.yangtools.yang.common.ErrorType.APPLICATION;
 
 import com.google.common.util.concurrent.FluentFuture;
 import com.google.common.util.concurrent.FutureCallback;
@@ -20,34 +18,42 @@ import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.MoreExecutors;
 import com.google.common.util.concurrent.SettableFuture;
-import java.util.Collection;
+import java.util.List;
 import java.util.Optional;
 import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.function.Function;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
 import org.opendaylight.mdsal.binding.api.DataBroker;
-import org.opendaylight.mdsal.binding.api.DataObjectModification;
 import org.opendaylight.mdsal.binding.api.DataTreeChangeListener;
 import org.opendaylight.mdsal.binding.api.DataTreeIdentifier;
 import org.opendaylight.mdsal.binding.api.DataTreeModification;
 import org.opendaylight.mdsal.binding.api.NotificationPublishService;
 import org.opendaylight.mdsal.binding.api.ReadWriteTransaction;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
 import org.opendaylight.mdsal.binding.api.WriteTransaction;
 import org.opendaylight.mdsal.common.api.CommitInfo;
 import org.opendaylight.mdsal.common.api.OptimisticLockFailedException;
 import org.opendaylight.mdsal.common.api.TransactionCommitFailedException;
+import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.CancelToast;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.CancelToastInput;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.CancelToastOutput;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.CancelToastOutputBuilder;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.DisplayString;
+import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToast;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToastInput;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToastOutput;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToastOutputBuilder;
+import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.RestockToaster;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.RestockToasterInput;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.RestockToasterOutput;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.RestockToasterOutputBuilder;
@@ -57,22 +63,38 @@ import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterOutOfBreadBuilder;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterRestocked;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterRestockedBuilder;
-import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.toaster.app.config.rev160503.ToasterAppConfig;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.toaster.app.config.rev160503.ToasterAppConfigBuilder;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.ErrorTag;
+import org.opendaylight.yangtools.yang.common.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.opendaylight.yangtools.yang.common.Uint16;
-import org.opendaylight.yangtools.yang.common.Uint32;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
+import org.osgi.service.metatype.annotations.AttributeDefinition;
+import org.osgi.service.metatype.annotations.Designate;
+import org.osgi.service.metatype.annotations.ObjectClassDefinition;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class OpendaylightToaster extends AbstractMXBean
-        implements ToasterService, ToasterProviderRuntimeMXBean, DataTreeChangeListener<Toaster>, AutoCloseable {
+@Singleton
+@Component(service = MakeToast.class, immediate = true)
+@Designate(ocd = OpendaylightToaster.Configuration.class)
+public final class OpendaylightToaster extends AbstractMXBean
+        implements MakeToast, ToasterProviderRuntimeMXBean, DataTreeChangeListener<Toaster>, AutoCloseable {
+    @ObjectClassDefinition
+    public @interface Configuration {
+        @AttributeDefinition(description = "The name of the toaster's manufacturer", max = "255")
+        String manufacturer() default TOASTER_MANUFACTURER;
+        @AttributeDefinition(description = "The name of the toaster's model", max = "255")
+        String modelNumber() default TOASTER_MODEL_NUMBER;
+        @AttributeDefinition(description = "How many times we attempt to make toast before failing ",
+            min = "0", max = "65535")
+        int maxMakeToastTries() default 2;
+    }
 
     private static final CancelToastOutput EMPTY_CANCEL_OUTPUT = new CancelToastOutputBuilder().build();
     private static final MakeToastOutput EMPTY_MAKE_OUTPUT = new MakeToastOutputBuilder().build();
@@ -81,12 +103,13 @@ public class OpendaylightToaster extends AbstractMXBean
     private static final Logger LOG = LoggerFactory.getLogger(OpendaylightToaster.class);
 
     private static final InstanceIdentifier<Toaster> TOASTER_IID = InstanceIdentifier.builder(Toaster.class).build();
-    private static final DisplayString TOASTER_MANUFACTURER = new DisplayString("Opendaylight");
-    private static final DisplayString TOASTER_MODEL_NUMBER = new DisplayString("Model 1 - Binding Aware");
+    private static final String TOASTER_MANUFACTURER = "Opendaylight";
+    private static final String TOASTER_MODEL_NUMBER = "Model 1 - Binding Aware";
 
-    private DataBroker dataBroker;
-    private NotificationPublishService notificationProvider;
-    private ListenerRegistration<OpendaylightToaster> dataTreeChangeListenerRegistration;
+    private final DataBroker dataBroker;
+    private final NotificationPublishService notificationProvider;
+    private final Registration dataTreeChangeListenerRegistration;
+    private final Registration reg;
 
     private final ExecutorService executor;
 
@@ -98,47 +121,68 @@ public class OpendaylightToaster extends AbstractMXBean
     private final AtomicLong toastsMade = new AtomicLong(0);
     private final AtomicLong darknessFactor = new AtomicLong(1000);
 
-    private final ToasterAppConfig toasterAppConfig;
-
-    public OpendaylightToaster() {
-        this(new ToasterAppConfigBuilder().setManufacturer(TOASTER_MANUFACTURER).setModelNumber(TOASTER_MODEL_NUMBER)
-                .setMaxMakeToastTries(Uint16.valueOf(2)).build());
-    }
+    private final @NonNull DisplayString manufacturer;
+    private final @NonNull DisplayString modelNumber;
+    private final int maxMakeToastTries;
 
-    public OpendaylightToaster(final ToasterAppConfig toasterAppConfig) {
+    public OpendaylightToaster(final DataBroker dataProvider,
+            final NotificationPublishService notificationPublishService, final RpcProviderService rpcProviderService,
+            final String manufacturer, final String modelNumber, final int maxMakeToastTries) {
         super("OpendaylightToaster", "toaster-provider", null);
-        executor = Executors.newFixedThreadPool(1);
-        this.toasterAppConfig = toasterAppConfig;
-    }
+        notificationProvider = requireNonNull(notificationPublishService);
+        dataBroker = requireNonNull(dataProvider);
 
-    public void setNotificationProvider(final NotificationPublishService notificationPublishService) {
-        this.notificationProvider = notificationPublishService;
-    }
+        this.manufacturer = new DisplayString(manufacturer);
+        this.modelNumber = new DisplayString(modelNumber);
+        this.maxMakeToastTries = maxMakeToastTries;
 
-    public void setDataBroker(final DataBroker dataBroker) {
-        this.dataBroker = dataBroker;
-    }
+        executor = Executors.newFixedThreadPool(1);
+        reg = rpcProviderService.registerRpcImplementations(
+            (CancelToast) this::cancelToast,
+            this,
+            (RestockToaster) this::restockToaster);
 
-    public void init() {
         LOG.info("Initializing...");
 
         dataTreeChangeListenerRegistration = requireNonNull(dataBroker, "dataBroker must be set")
-            .registerDataTreeChangeListener(DataTreeIdentifier.create(CONFIGURATION, TOASTER_IID), this);
-        setToasterStatusUp(null);
+            .registerTreeChangeListener(DataTreeIdentifier.of(CONFIGURATION, TOASTER_IID), this);
+        try {
+            setToasterStatusUp(null).get();
+        } catch (InterruptedException | ExecutionException e) {
+            throw new IllegalStateException("Failed to commit initial data", e);
+        }
 
         // Register our MXBean.
         register();
     }
 
+    @Inject
+    public OpendaylightToaster(final DataBroker dataProvider,
+            final NotificationPublishService notificationPublishService, final RpcProviderService rpcProviderService) {
+        this(dataProvider, notificationPublishService, rpcProviderService, TOASTER_MANUFACTURER, TOASTER_MODEL_NUMBER,
+            2);
+    }
+
+    @Activate
+    public OpendaylightToaster(@Reference final DataBroker dataProvider,
+            @Reference final NotificationPublishService notificationPublishService,
+            @Reference final RpcProviderService rpcProviderService, final @NonNull Configuration configuration) {
+        this(dataProvider, notificationPublishService, rpcProviderService, configuration.manufacturer(),
+            configuration.modelNumber(), configuration.maxMakeToastTries());
+    }
+
     /**
      * Implemented from the AutoCloseable interface.
      */
     @Override
+    @PreDestroy
+    @Deactivate
     public void close() {
         LOG.info("Closing...");
 
         // Unregister our MXBean.
         unregister();
+        reg.close();
 
         // When we close this service we need to shutdown our executor!
         executor.shutdown();
@@ -168,31 +212,37 @@ public class OpendaylightToaster extends AbstractMXBean
         // note - we are simulating a device whose manufacture and model are
         // fixed (embedded) into the hardware.
         // This is why the manufacture and model number are hardcoded.
-        return new ToasterBuilder().setToasterManufacturer(toasterAppConfig.getManufacturer())
-                .setToasterModelNumber(toasterAppConfig.getModelNumber()).setToasterStatus(status).build();
+        return new ToasterBuilder()
+            .setToasterManufacturer(manufacturer)
+            .setToasterModelNumber(modelNumber)
+            .setToasterStatus(status)
+            .build();
     }
 
     /**
      * Implemented from the DataTreeChangeListener interface.
      */
     @Override
-    public void onDataTreeChanged(final Collection<DataTreeModification<Toaster>> changes) {
-        for (DataTreeModification<Toaster> change: changes) {
-            DataObjectModification<Toaster> rootNode = change.getRootNode();
-            if (rootNode.getModificationType() == WRITE) {
-                Toaster oldToaster = rootNode.getDataBefore();
-                Toaster newToaster = rootNode.getDataAfter();
-                LOG.info("onDataTreeChanged - Toaster config with path {} was added or replaced: "
-                        + "old Toaster: {}, new Toaster: {}", change.getRootPath().getRootIdentifier(),
-                        oldToaster, newToaster);
-
-                Uint32 darkness = newToaster.getDarknessFactor();
-                if (darkness != null) {
-                    darknessFactor.set(darkness.toJava());
+    public void onDataTreeChanged(final List<DataTreeModification<Toaster>> changes) {
+        for (var change: changes) {
+            final var rootNode = change.getRootNode();
+            switch (rootNode.modificationType()) {
+                case WRITE -> {
+                    final var oldToaster = rootNode.dataBefore();
+                    final var newToaster = rootNode.dataAfter();
+                    LOG.info("onDataTreeChanged - Toaster config with path {} was added or replaced: old Toaster: {}, "
+                        + "new Toaster: {}", change.getRootPath().path(), oldToaster, newToaster);
+
+                    final var darkness = newToaster.getDarknessFactor();
+                    if (darkness != null) {
+                        darknessFactor.set(darkness.toJava());
+                    }
+                }
+                case DELETE -> LOG.info("onDataTreeChanged - Toaster config with path {} was deleted: old Toaster: {}",
+                        change.getRootPath().path(), rootNode.dataBefore());
+                default -> {
+                    // No-op
                 }
-            } else if (rootNode.getModificationType() == DELETE) {
-                LOG.info("onDataTreeChanged - Toaster config with path {} was deleted: old Toaster: {}",
-                        change.getRootPath().getRootIdentifier(), rootNode.getDataBefore());
             }
         }
     }
@@ -200,9 +250,8 @@ public class OpendaylightToaster extends AbstractMXBean
     /**
      * RPC call implemented from the ToasterService interface that cancels the current toast, if any.
      */
-    @Override
-    public ListenableFuture<RpcResult<CancelToastOutput>> cancelToast(final CancelToastInput input) {
-        Future<?> current = currentMakeToastTask.getAndSet(null);
+    private ListenableFuture<RpcResult<CancelToastOutput>> cancelToast(final CancelToastInput input) {
+        final var current = currentMakeToastTask.getAndSet(null);
         if (current != null) {
             current.cancel(true);
         }
@@ -215,23 +264,20 @@ public class OpendaylightToaster extends AbstractMXBean
      * RPC call implemented from the ToasterService interface that attempts to make toast.
      */
     @Override
-    public ListenableFuture<RpcResult<MakeToastOutput>> makeToast(final MakeToastInput input) {
+    public ListenableFuture<RpcResult<MakeToastOutput>> invoke(final MakeToastInput input) {
         LOG.info("makeToast: {}", input);
-
-        final SettableFuture<RpcResult<MakeToastOutput>> futureResult = SettableFuture.create();
-
-        checkStatusAndMakeToast(input, futureResult, toasterAppConfig.getMaxMakeToastTries().toJava());
-
+        final var futureResult = SettableFuture.<RpcResult<MakeToastOutput>>create();
+        checkStatusAndMakeToast(input, futureResult, maxMakeToastTries);
         return futureResult;
     }
 
     private static RpcError makeToasterOutOfBreadError() {
-        return RpcResultBuilder.newError(APPLICATION, "resource-denied", "Toaster is out of bread", "out-of-stock",
-                null, null);
+        return RpcResultBuilder.newError(APPLICATION, ErrorTag.RESOURCE_DENIED, "Toaster is out of bread",
+            "out-of-stock", null, null);
     }
 
     private static RpcError makeToasterInUseError() {
-        return RpcResultBuilder.newWarning(APPLICATION, "in-use", "Toaster is busy", null, null, null);
+        return RpcResultBuilder.newWarning(APPLICATION, ErrorTag.IN_USE, "Toaster is busy", null, null, null);
     }
 
     private void checkStatusAndMakeToast(final MakeToastInput input,
@@ -246,7 +292,7 @@ public class OpendaylightToaster extends AbstractMXBean
             Futures.transformAsync(readFuture, toasterData -> {
                 ToasterStatus toasterStatus = ToasterStatus.Up;
                 if (toasterData.isPresent()) {
-                    toasterStatus = toasterData.get().getToasterStatus();
+                    toasterStatus = toasterData.orElseThrow().getToasterStatus();
                 }
 
                 LOG.debug("Read toaster status: {}", toasterStatus);
@@ -321,8 +367,7 @@ public class OpendaylightToaster extends AbstractMXBean
      * Restocks the bread for the toaster, resets the toastsMade counter to 0, and sends a
      * ToasterRestocked notification.
      */
-    @Override
-    public ListenableFuture<RpcResult<RestockToasterOutput>> restockToaster(final RestockToasterInput input) {
+    private ListenableFuture<RpcResult<RestockToasterOutput>> restockToaster(final RestockToasterInput input) {
         LOG.info("restockToaster: {}", input);
 
         amountOfBreadInStock.set(input.getAmountOfBreadToStock().toJava());
@@ -353,11 +398,12 @@ public class OpendaylightToaster extends AbstractMXBean
         return toastsMade.get();
     }
 
-    private void setToasterStatusUp(final Function<Boolean, MakeToastOutput> resultCallback) {
+    private ListenableFuture<?> setToasterStatusUp(final Function<Boolean, MakeToastOutput> resultCallback) {
         WriteTransaction tx = dataBroker.newWriteOnlyTransaction();
         tx.put(OPERATIONAL,TOASTER_IID, buildToaster(ToasterStatus.Up));
 
-        Futures.addCallback(tx.commit(), new FutureCallback<CommitInfo>() {
+        final var future = tx.commit();
+        Futures.addCallback(future, new FutureCallback<CommitInfo>() {
             @Override
             public void onSuccess(final CommitInfo result) {
                 LOG.info("Successfully set ToasterStatus to Up");
@@ -379,6 +425,8 @@ public class OpendaylightToaster extends AbstractMXBean
                 }
             }
         }, MoreExecutors.directExecutor());
+
+        return future;
     }
 
     private boolean outOfBread() {
@@ -400,7 +448,7 @@ public class OpendaylightToaster extends AbstractMXBean
         public Void call() {
             try {
                 // make toast just sleeps for n seconds per doneness level.
-                Thread.sleep(OpendaylightToaster.this.darknessFactor.get()
+                Thread.sleep(darknessFactor.get()
                         * toastRequest.getToasterDoneness().toJava());
 
             } catch (InterruptedException e) {
diff --git a/opendaylight/md-sal/samples/toaster-provider/src/main/resources/OSGI-INF/blueprint/toaster-provider.xml b/opendaylight/md-sal/samples/toaster-provider/src/main/resources/OSGI-INF/blueprint/toaster-provider.xml
deleted file mode 100644 (file)
index 4c7af5b..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
-           xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
-           xmlns:cm="http://aries.apache.org/blueprint/xmlns/blueprint-cm/v1.1.0"
-    odl:restart-dependents-on-updates="true" odl:use-default-for-reference-types="true">
-
-  <!-- "restart-dependents-on-updates" is an ODL extension attribute that processes any "property-placeholder"
-       elements and reacts to updates to the corresponding cfg file by restarting this blueprint container any
-       dependent containers that consume OSGi services provided by this container in an atomic and orderly
-       manner.
-
-       "use-default-for-reference-types" is an ODL extension attribute that adds a filter to all services
-       imported via "reference" elements where the "type" property is either not set or set to "default" if
-       the odl:type attribute isn't explicitly specified. This ensures the default implementation is imported
-       if there are other implementations advertised with other types.
-  -->
-
-  <!-- Accesses properties via the etc/org.opendaylight.toaster.cfg file. The properties are made available
-       as variables that can be referenced. The variables are substituted with the actual values read from
-       the cfg file, if present, or the default-properties.
-   -->
-  <cm:property-placeholder persistent-id="org.opendaylight.toaster" update-strategy="none">
-    <cm:default-properties>
-      <cm:property name="databroker-type" value="default"/>
-    </cm:default-properties>
-  </cm:property-placeholder>
-
-  <!-- "clustered-app-config" is an ODL extension that obtains an application configuration yang container
-       from the MD-SAL data store and makes the binding DataObject available as a bean that can be injected
-       into other beans. Here we obtain the ToasterAppConfig container DataObject. This also shows how to
-       specify default data via the "default-config" child element. While default leaf values defined in the
-       yang are returned, one may have more complex data, eg lists, that require default data. The
-       "default-config" must contain the XML representation of the yang data, including namespace, wrapped
-       in a CDATA section to prevent the blueprint container from treating it as markup.
-  -->
-  <odl:clustered-app-config id="toasterAppConfig"
-      binding-class="org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.toaster.app.config.rev160503.ToasterAppConfig">
-    <odl:default-config><![CDATA[
-      <toaster-app-config xmlns="urn:opendaylight:params:xml:ns:yang:controller:toaster-app-config">
-        <max-make-toast-tries>3</max-make-toast-tries>
-      </toaster-app-config>
-    ]]></odl:default-config>
-  </odl:clustered-app-config>
-
-  <!-- Import MD-SAL services. For the DataBroker, we explicitly specify the odl:type which is configurable
-       via the cfg file. In this manner the toaster can be configured to use the default clustered DataBroker
-       or the specialized "pingpong" DataBroker (or any other DataBroker implementation).
-   -->
-  <reference id="dataBroker" interface="org.opendaylight.mdsal.binding.api.DataBroker" odl:type="${databroker-type}" />
-  <reference id="notificationService" interface="org.opendaylight.mdsal.binding.api.NotificationPublishService"/>
-
-  <!-- Create the OpendaylightToaster instance and inject its dependencies -->
-  <bean id="toaster" class="org.opendaylight.controller.sample.toaster.provider.OpendaylightToaster"
-          init-method="init" destroy-method="close">
-    <argument ref="toasterAppConfig"/>
-    <property name="dataBroker" ref="dataBroker"/>
-    <property name="notificationProvider" ref="notificationService"/>
-  </bean>
-
-  <!-- Register the OpendaylightToaster instance as an RPC implementation provider. The "rpc-implementation"
-       element automatically figures out the RpcService interface although it can be explicitly specified.
-   -->
-  <odl:rpc-implementation ref="toaster"/>
-</blueprint>
diff --git a/opendaylight/md-sal/samples/toaster-provider/src/main/yang/toaster-app-config.yang b/opendaylight/md-sal/samples/toaster-provider/src/main/yang/toaster-app-config.yang
deleted file mode 100644 (file)
index 369ba46..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-module toaster-app-config {
-    yang-version 1;
-
-    namespace "urn:opendaylight:params:xml:ns:yang:controller:toaster-app-config";
-    prefix toaster-app-config;
-
-    import toaster { prefix toaster; revision-date 2009-11-20; }
-
-    description
-      "Configuration for the Opendaylight toaster application.";
-
-    revision "2016-05-03" {
-        description
-            "Initial revision.";
-    }
-
-    container toaster-app-config {
-        leaf manufacturer {
-            type toaster:DisplayString;
-            default "Opendaylight";
-        }
-
-        leaf model-number {
-            type toaster:DisplayString;
-            default "Model 1 - Binding Aware";
-        }
-
-        leaf max-make-toast-tries {
-            type uint16;
-            default 2;
-        }
-    }
-}
\ No newline at end of file
index 1b6dc3cc2c1413fae485dd08f0c3d1bfde80e0e4..86d2e6d19c249dad322f0f8a94d5ecbd2b546264 100644 (file)
@@ -22,6 +22,7 @@ import org.junit.Test;
 import org.opendaylight.mdsal.binding.api.DataBroker;
 import org.opendaylight.mdsal.binding.api.NotificationPublishService;
 import org.opendaylight.mdsal.binding.api.ReadTransaction;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
 import org.opendaylight.mdsal.binding.dom.adapter.test.AbstractConcurrentDataBrokerTest;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.DisplayString;
@@ -41,44 +42,43 @@ public class OpenDaylightToasterTest extends AbstractConcurrentDataBrokerTest {
 
     @Before
     public void setupToaster() {
-        toaster = new OpendaylightToaster();
-        toaster.setDataBroker(getDataBroker());
-        toaster.init();
-
-        // We'll mock the NotificationProviderService.
-        NotificationPublishService mockNotification = mock(NotificationPublishService.class);
-        toaster.setNotificationProvider(mockNotification);
+        toaster = new OpendaylightToaster(getDataBroker(), mock(NotificationPublishService.class),
+            mock(RpcProviderService.class));
     }
 
     @Test
     public void testToasterInitOnStartUp() throws Exception {
         DataBroker broker = getDataBroker();
 
-        ReadTransaction readTx = broker.newReadOnlyTransaction();
-        Optional<Toaster> optional = readTx.read(LogicalDatastoreType.OPERATIONAL, TOASTER_IID).get();
+        Optional<Toaster> optional;
+        try (ReadTransaction readTx = broker.newReadOnlyTransaction()) {
+            optional = readTx.read(LogicalDatastoreType.OPERATIONAL, TOASTER_IID).get();
+        }
         assertNotNull(optional);
         assertTrue("Operational toaster not present", optional.isPresent());
 
-        Toaster toasterData = optional.get();
+        Toaster toasterData = optional.orElseThrow();
 
         assertEquals(Toaster.ToasterStatus.Up, toasterData.getToasterStatus());
         assertEquals(new DisplayString("Opendaylight"), toasterData.getToasterManufacturer());
         assertEquals(new DisplayString("Model 1 - Binding Aware"), toasterData.getToasterModelNumber());
 
-        Optional<Toaster> configToaster = readTx.read(LogicalDatastoreType.CONFIGURATION, TOASTER_IID).get();
-        assertFalse("Didn't expect config data for toaster.", configToaster.isPresent());
+        try (ReadTransaction readTx = broker.newReadOnlyTransaction()) {
+            Boolean configToaster = readTx.exists(LogicalDatastoreType.CONFIGURATION, TOASTER_IID).get();
+            assertFalse("Didn't expect config data for toaster.", configToaster);
+        }
     }
 
     @Test
     @Ignore //ignored because it is not a test right now. Illustrative purposes only.
     public void testSomething() throws Exception {
         MakeToastInput toastInput = new MakeToastInputBuilder().setToasterDoneness(Uint32.valueOf(1))
-                .setToasterToastType(WheatBread.class).build();
+                .setToasterToastType(WheatBread.VALUE).build();
 
         // NOTE: In a real test we would want to override the Thread.sleep() to
         // prevent our junit test
         // for sleeping for a second...
-        Future<RpcResult<MakeToastOutput>> makeToast = toaster.makeToast(toastInput);
+        Future<RpcResult<MakeToastOutput>> makeToast = toaster.invoke(toastInput);
 
         RpcResult<MakeToastOutput> rpcResult = makeToast.get();
 
index 8980aa6ef5df68f740cb0f75330bf750f8e9702e..57230fa79d95283a8ad263e7bd944e34032de4f9 100644 (file)
@@ -5,7 +5,7 @@
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-parent</artifactId>
-    <version>5.0.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../../parent</relativePath>
   </parent>
 
diff --git a/pom.xml b/pom.xml
index 803fd34bc643fef58f94f4195c77b5ba53199b3c..7b3c5253d3436995679581f4a81f90b0ef037cf9 100644 (file)
--- a/pom.xml
+++ b/pom.xml
@@ -4,13 +4,13 @@
   <parent>
     <groupId>org.opendaylight.odlparent</groupId>
     <artifactId>odlparent-lite</artifactId>
-    <version>9.0.12</version>
+    <version>13.0.11</version>
     <relativePath/>
   </parent>
 
   <groupId>org.opendaylight.controller</groupId>
   <artifactId>releasepom</artifactId>
-  <version>5.0.0-SNAPSHOT</version>
+  <version>9.0.3-SNAPSHOT</version>
   <packaging>pom</packaging>
   <name>controller</name>
   <!-- Used by Sonar to set project name -->
     <module>karaf</module>
 
     <module>akka</module>
+    <module>atomix-storage</module>
     <module>bundle-parent</module>
     <module>benchmark</module>
     <module>jolokia</module>
 
-    <!-- md-sal -->
-    <module>opendaylight/md-sal</module>
-    <!-- config -->
-    <module>opendaylight/config</module>
-
     <module>opendaylight/blueprint</module>
+    <module>opendaylight/md-sal</module>
   </modules>
 
   <profiles>